diff --git a/.gitignore b/.gitignore index 09c56e81..d28597e3 100644 --- a/.gitignore +++ b/.gitignore @@ -34,5 +34,18 @@ obj/ _ReSharper*/ [Tt]est[Rr]esult* App_Data/ +.vs/ +.vscode/ +*.lock.json +*.nuget.props +*.nuget.targets + NuGet/ -build/ \ No newline at end of file +NuGet.Signed/ +packages/ + +build/ +tests/ServiceStack.Redis.Tests/azureconfig.txt +*.rdb +*.dat +*.rdb \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..4d7b5ce3 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,16 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": ".NET Core Launch (console)", + "type": "coreclr", + "request": "launch", + "preLaunchTask": "build", + "program": "${workspaceRoot}/tests/ServiceStack.Redis.Tests/bin/Debug/netcoreapp1.0/ServiceStack.Redis.Tests.dll", + "args": [], + "cwd": "${workspaceRoot}", + "stopAtEntry": false, + "externalConsole": false + } + ] +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 00000000..d5b6d366 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,20 @@ +{ + // See https://go.microsoft.com/fwlink/?LinkId=733558 + // for the documentation about the tasks.json format + "version": "2.0.0", + "command": "dotnet", + "args": [], + "tasks": [ + { + "label": "build", + "type": "shell", + "command": "dotnet", + "args": [ + "build", + "tests/ServiceStack.Redis.Tests" + ], + "problemMatcher": "$msCompile", + "group": "build" + } + ] +} \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..9cbc9721 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,5 @@ +## Contributor License Agreement + +* Please sign the [Contributor License Agreement](https://docs.google.com/forms/d/16Op0fmKaqYtxGL4sg7w_g-cXXyCoWjzppgkuqzOeKyk/viewform) in order to have your changes merged. + +See the [Contributing Wiki](https://github.com/ServiceStack/ServiceStack/wiki/Contributing) to learn how you can Contribute! diff --git a/LICENSE b/LICENSE deleted file mode 100644 index c5e98c93..00000000 --- a/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2007-2011, Demis Bellot, ServiceStack. -http://www.servicestack.net -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the ServiceStack nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/NuGet.Config b/NuGet.Config new file mode 100644 index 00000000..42daf5f4 --- /dev/null +++ b/NuGet.Config @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/NuGet/lib/net35/ServiceStack.Redis.XML b/NuGet/lib/net35/ServiceStack.Redis.XML deleted file mode 100644 index 19efd208..00000000 --- a/NuGet/lib/net35/ServiceStack.Redis.XML +++ /dev/null @@ -1,1355 +0,0 @@ - - - - ServiceStack.Redis - - - - - Provides thread-safe retrievel of redis clients since each client is a new one. - Allows the configuration of different ReadWrite and ReadOnly hosts - - - BasicRedisClientManager for ICacheClient - - For more interoperabilty I'm also implementing the ICacheClient on - this cache client manager which has the affect of calling - GetCacheClient() for all write operations and GetReadOnlyCacheClient() - for the read ones. - - This works well for master-slave replication scenarios where you have - 1 master that replicates to multiple read slaves. - - - - - Hosts can be an IP Address or Hostname in the format: host[:port] - e.g. 127.0.0.1:6379 - default is: localhost:6379 - - The write hosts. - The read hosts. - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - - - - - - Gets or sets object key prefix. - - - - - Courtesy of @marcgravell - http://code.google.com/p/protobuf-net/source/browse/trunk/protobuf-net/BufferPool.cs - - - - - A complete redis command, with method to send command, receive response, and run callback on success or failure - - - - - Allows you to get Redis value operations to operate against POCO types. - - - - - - Use this to share the same redis connection with another - - The client. - - - - Pipeline for redis typed client - - - - - - Queue of commands for redis typed client - - - - - - Redis operation (transaction/pipeline) that allows queued commands to be completed - - - - - Creates an MQ Host that processes all messages on a single background thread. - i.e. If you register 3 handlers it will only create 1 background thread. - - The same background thread that listens to the Redis MQ Subscription for new messages - also cycles through each registered handler processing all pending messages one-at-a-time: - first in the message PriorityQ, then in the normal message InQ. - - The Start/Stop methods are idempotent i.e. It's safe to call them repeatedly on multiple threads - and the Redis MQ Host will only have Started/Stopped once. - - - - - Inject your own Reply Client Factory to handle custom Message.ReplyTo urls. - - - - - Creates a Redis MQ Server that processes each message on its own background thread. - i.e. if you register 3 handlers it will create 7 background threads: - - 1 listening to the Redis MQ Subscription, getting notified of each new message - - 3x1 Normal InQ for each message handler - - 3x1 PriorityQ for each message handler - - When RedisMqServer Starts it creates a background thread subscribed to the Redis MQ Topic that - listens for new incoming messages. It also starts 2 background threads for each message type: - - 1 for processing the services Priority Queue and 1 processing the services normal Inbox Queue. - - Priority Queue's can be enabled on a message-per-message basis by specifying types in the - OnlyEnablePriortyQueuesForTypes property. The DisableAllPriorityQueues property disables all Queues. - - The Start/Stop methods are idempotent i.e. It's safe to call them repeatedly on multiple threads - and the Redis MQ Server will only have Started or Stopped once. - - - - - Execute global transformation or custom logic before a request is processed. - Must be thread-safe. - - - - - Execute global transformation or custom logic on the response. - Must be thread-safe. - - - - - Execute global error handler logic. Must be thread-safe. - - - - - If you only want to enable priority queue handlers (and threads) for specific msg types - - - - - Don't listen on any Priority Queues - - - - - Ignore dispose on RedisClientsManager, which should be registered as a singleton - - - - - Useful wrapper IRedisClientsManager to cut down the boiler plat of most IRedisClient access - - - - - A complete redis command, with method to send command, receive response, and run callback on success or failure - - - - - Redis command that does not get queued - - - - - The client wraps the native redis operations into a more readable c# API. - - Where possible these operations are also exposed in common c# interfaces, - e.g. RedisClient.Lists => IList[string] - RedisClient.Sets => ICollection[string] - - - - - This class contains all the common operations for the RedisClient. - The client contains a 1:1 mapping of c# methods to redis operations of the same name. - - Not threadsafe use a pooled manager - - - - - Requires custom result parsing - - Number of results - - - - Command to set multuple binary safe arguments - - - - - - - reset buffer index in send buffer - - - - - Used to manage connection pooling - - - - - Gets or sets object key prefix. - - - - - Creates a new instance of the Redis Client from NewFactoryFn. - - - - - Returns key with automatic object id detection in provided value with generic type. - - - - - - - Returns key with explicit object id. - - - - - - - Returns key with explicit object type and id. - - - - - - - - Provides a redis connection pool that can be sharded - - - - - For interoperabilty GetCacheClient() and GetReadOnlyCacheClient() - return an ICacheClient wrapper around the redis manager which has the affect of calling - GetClient() for all write operations and GetReadOnlyClient() for the read ones. - - This works well for master-slave replication scenarios where you have - 1 master that replicates to multiple read slaves. - - - Provides thread-safe pooling of redis client connections. - Allows load-balancing of master-write and read-slave hosts, ideal for - 1 master and multiple replicated read slaves. - - - - - Hosts can be an IP Address or Hostname in the format: host[:port] - e.g. 127.0.0.1:6379 - default is: localhost:6379 - - The write hosts. - The read hosts. - The config. - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Called within a lock - - - - - - Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - - - - - - Called within a lock - - - - - - Disposes the read only client. - - The client. - - - - Disposes the write client. - - The client. - - - - Gets or sets object key prefix. - - - - - Manage a client acquired from the PooledRedisClientManager - Dispose method will release the client back to the pool. - - - - - wrap the acquired client - - - - - - release the wrapped client back to the pool - - - - - access the wrapped client - - - - - logical name - - - - - An arbitrary weight relative to other nodes - - - - logical name - An arbitrary weight relative to other nodes - redis nodes - - - - Provides sharding of redis client connections. - uses consistent hashing to distribute keys across connection pools - - - - - maps a key to a redis connection pool - - key to map - a redis connection pool - - - - Adds a node and maps points across the circle - - node to add - An arbitrary number, specifies how often it occurs relative to other targets. - - - - A variation of Binary Search algorithm. Given a number, matches the next highest number from the sorted array. - If a higher number does not exist, then the first number in the array is returned. - - a sorted array to perform the search - number to find the next highest number against - next highest number - - - - Given a key, generates an unsigned 64 bit hash code using MD5 - - - - - - - Distributed lock interface - - - - - Optimized implementation. Primitive types are manually serialized, the rest are serialized using binary serializer />. - - - - - serialize/deserialize arbitrary objects - (objects must be serializable) - - - - - Serialize object to buffer - - serializable object - - - - - Deserialize buffer to object - - byte array to deserialize - - - - - - - - - - - - - - - - - - - serialize value and wrap with - - - - - - - Unwrap object wrapped in - - - - - - - pop numProcessed items from queue and unlock queue for work item id that dequeued - items are associated with - - - - - - A dequeued work item has been processed. When all of the dequeued items have been processed, - all items will be popped from the queue,and the queue unlocked for the work item id that - the dequeued items are associated with - - - - - Update first unprocessed item with new work item. - - - - - - - - - - - distributed work item queue. Each message must have an associated - work item id. For a given id, all work items are guaranteed to be processed - in the order in which they are received. - - - - - distributed work item queue. Each message must have an associated - work item id. For a given id, all work items are guaranteed to be processed - in the order in which they are received. - - - - - - - distributed work item queue - - - - - Enqueue item in priority queue corresponding to workItemId identifier - - - - - - - Preprare next work item id for dequeueing - - - - - Dequeue up to maxBatchSize items from queue corresponding to workItemId identifier. - Once this method is called, or will not - return any items for workItemId until the dequeue lock returned is unlocked. - - - - - - - - Replace existing work item in workItemId queue - - - - - - - - Queue incoming messages - - - - - - - Must call this periodically to move work items from priority queue to pending queue - - - - - Replace existing work item in workItemId queue - - - - - - - - Pop items from list - - - - - - - Force release of locks held by crashed servers - - - - - release lock held by crashed server - - - - true if lock is released, either by this method or by another client; false otherwise - - - - Unlock work item id, so other servers can process items for this id - - - - - - - - - - - - - - - - - - - - - - - - - - - pop remaining items that were returned by dequeue, and unlock queue - - - - - - indicate that an item has been processed by the caller - - - - - Update first unprocessed work item - - - - - - wraps a serialized representation of an object - - - - - - Initializes a new instance of . - - Custom item data. - The serialized item. - - - - The data representing the item being stored/retireved. - - - - - Flags set for this instance. - - - - - distributed lock class that follows the Resource Allocation Is Initialization pattern - - - - - Lock - - - - in seconds - in seconds - - - - unlock - - - - - Enqueue item - - - - - - Dequeue up to maxBatchSize items from queue - - - - - - - distributed work item queue. Messages are processed in chronological order - - - - - Enqueue incoming messages - - - - - - - - Dequeue next batch of work items - - - - - - - - - simple distributed work item queue - - - - - - - Queue incoming messages - - - - - - Dequeue next batch of work items for processing. After this method is called, - no other work items with same id will be available for - dequeuing until PostDequeue is called - - KeyValuePair: key is work item id, and value is list of dequeued items. - - - - - Serialize object to buffer - - serializable object - - - - - - - array of serializable objects - - - - - Deserialize buffer to object - - byte array to deserialize - - - - - - customize the client serializer - - - - - Factory to create SerializingRedisClient objects - - - - - - - - - General purpose pipeline - - - - - - Flush send buffer, and read responses - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - - - - - Put "QUEUED" messages at back of queue - - - - - - Issue exec command (not queued) - - - - - callback for after result count is read in - - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Transient message queues are a one-pass message queue service that starts - processing messages when Start() is called. Any subsequent Start() calls - while the service is running is ignored. - - The transient service will continue to run until all messages have been - processed after which time it will shutdown all processing until Start() is called again. - - - - - Redis-specific exception. Thrown if unable to connect to Redis server due to socket exception, for example. - - - - - Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - - - - - Put "QUEUED" messages at back of queue - - - - - - Issue exec command (not queued) - - - - - callback for after result count is read in - - - - - - Provide the default factory implementation for creating a RedisClient that - can be mocked and used by different 'Redis Client Managers' - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Provide the factory implementation for creating a RedisCacheClient that - can be mocked and used by different 'Redis Client Managers' - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Wrap the common redis list operations under a IList[string] interface. - - - - - Represents a generic collection of key/value pairs that are ordered independently of the key and value. - - The type of the keys in the dictionary - The type of the values in the dictionary - - - - Adds an entry with the specified key and value into the IOrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. - The index of the newly added entry - - You can also use the property to add new elements by setting the value of a key that does not exist in the IOrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the IOrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - An element with the same key already exists in the IOrderedDictionary<TKey,TValue> - The IOrderedDictionary<TKey,TValue> is read-only.
- -or-
- The IOrderedDictionary<TKey,TValue> has a fized size.
-
- - - Inserts a new entry into the IOrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - - The zero-based index at which the element should be inserted. - The key of the entry to add. - The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - is less than 0.
- -or-
- is greater than .
- An element with the same key already exists in the IOrderedDictionary<TKey,TValue>. - The IOrderedDictionary<TKey,TValue> is read-only.
- -or-
- The IOrderedDictionary<TKey,TValue> has a fized size.
-
- - - Gets or sets the value at the specified index. - - The zero-based index of the value to get or set. - The value of the item at the specified index. - is less than 0.
- -or-
- is equal to or greater than .
-
- - - Represents a generic collection of key/value pairs that are ordered independently of the key and value. - - The type of the keys in the dictionary - The type of the values in the dictionary - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class. - - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified initial capacity. - - The initial number of elements that the OrderedDictionary<TKey,TValue> can contain. - is less than 0 - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified comparer. - - The IEqualityComparer<TKey> to use when comparing keys, or to use the default EqualityComparer<TKey> for the type of the key. - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified initial capacity and comparer. - - The initial number of elements that the OrderedDictionary<TKey,TValue> collection can contain. - The IEqualityComparer<TKey> to use when comparing keys, or to use the default EqualityComparer<TKey> for the type of the key. - is less than 0 - - - - Converts the object passed as a key to the key type of the dictionary - - The key object to check - The key object, cast as the key type of the dictionary - is . - The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - - - - Converts the object passed as a value to the value type of the dictionary - - The object to convert to the value type of the dictionary - The value object, converted to the value type of the dictionary - is , and the value type of the OrderedDictionary<TKey,TValue> is a value type. - The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - - - - Inserts a new entry into the OrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - - The zero-based index at which the element should be inserted. - The key of the entry to add. - The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - is less than 0.
- -or-
- is greater than .
- is . - An element with the same key already exists in the OrderedDictionary<TKey,TValue>. -
- - - Inserts a new entry into the OrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - - The zero-based index at which the element should be inserted. - The key of the entry to add. - The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - is less than 0.
- -or-
- is greater than .
- is .
- -or-
- is , and the value type of the OrderedDictionary<TKey,TValue> is a value type.
- The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- -or-
- The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- -or-
- An element with the same key already exists in the OrderedDictionary<TKey,TValue>.
-
- - - Removes the entry at the specified index from the OrderedDictionary<TKey,TValue> collection. - - The zero-based index of the entry to remove. - is less than 0.
- -or-
- index is equal to or greater than .
-
- - - Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. This value can be . - A key cannot be , but a value can be. - You can also use the property to add new elements by setting the value of a key that does not exist in the OrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the OrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - is - An element with the same key already exists in the OrderedDictionary<TKey,TValue> - - - - Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. This value can be . - The index of the newly added entry - A key cannot be , but a value can be. - You can also use the property to add new elements by setting the value of a key that does not exist in the OrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the OrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - is - An element with the same key already exists in the OrderedDictionary<TKey,TValue> - - - - Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. This value can be . - is .
- -or-
- is , and the value type of the OrderedDictionary<TKey,TValue> is a value type.
- The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- -or-
- The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
-
- - - Removes all elements from the OrderedDictionary<TKey,TValue> collection. - - The capacity is not changed as a result of calling this method. - - - - Determines whether the OrderedDictionary<TKey,TValue> collection contains a specific key. - - The key to locate in the OrderedDictionary<TKey,TValue> collection. - if the OrderedDictionary<TKey,TValue> collection contains an element with the specified key; otherwise, . - is - - - - Determines whether the OrderedDictionary<TKey,TValue> collection contains a specific key. - - The key to locate in the OrderedDictionary<TKey,TValue> collection. - if the OrderedDictionary<TKey,TValue> collection contains an element with the specified key; otherwise, . - is - The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - - - - Returns the zero-based index of the specified key in the OrderedDictionary<TKey,TValue> - - The key to locate in the OrderedDictionary<TKey,TValue> - The zero-based index of , if is found in the OrderedDictionary<TKey,TValue>; otherwise, -1 - This method performs a linear search; therefore it has a cost of O(n) at worst. - - - - Removes the entry with the specified key from the OrderedDictionary<TKey,TValue> collection. - - The key of the entry to remove - if the key was found and the corresponding element was removed; otherwise, - - - - Removes the entry with the specified key from the OrderedDictionary<TKey,TValue> collection. - - The key of the entry to remove - - - - Copies the elements of the OrderedDictionary<TKey,TValue> elements to a one-dimensional Array object at the specified index. - - The one-dimensional object that is the destination of the objects copied from the OrderedDictionary<TKey,TValue>. The must have zero-based indexing. - The zero-based index in at which copying begins. - The method preserves the order of the elements in the OrderedDictionary<TKey,TValue> - - - - Gets the value associated with the specified key. - - The key of the value to get. - When this method returns, contains the value associated with the specified key, if the key is found; otherwise, the default value for the type of . This parameter can be passed uninitialized. - if the OrderedDictionary<TKey,TValue> contains an element with the specified key; otherwise, . - - - - Adds the specified value to the OrderedDictionary<TKey,TValue> with the specified key. - - The KeyValuePair<TKey,TValue> structure representing the key and value to add to the OrderedDictionary<TKey,TValue>. - - - - Determines whether the OrderedDictionary<TKey,TValue> contains a specific key and value. - - The KeyValuePair<TKey,TValue> structure to locate in the OrderedDictionary<TKey,TValue>. - if is found in the OrderedDictionary<TKey,TValue>; otherwise, . - - - - Copies the elements of the OrderedDictionary<TKey,TValue> to an array of type , starting at the specified index. - - The one-dimensional array of type KeyValuePair<TKey,TValue> that is the destination of the KeyValuePair<TKey,TValue> elements copied from the OrderedDictionary<TKey,TValue>. The array must have zero-based indexing. - The zero-based index in at which copying begins. - - - - Removes a key and value from the dictionary. - - The KeyValuePair<TKey,TValue> structure representing the key and value to remove from the OrderedDictionary<TKey,TValue>. - if the key and value represented by is successfully found and removed; otherwise, . This method returns if is not found in the OrderedDictionary<TKey,TValue>. - - - - Gets the dictionary object that stores the keys and values - - The dictionary object that stores the keys and values for the OrderedDictionary<TKey,TValue> - Accessing this property will create the dictionary object if necessary - - - - Gets the list object that stores the key/value pairs. - - The list object that stores the key/value pairs for the OrderedDictionary<TKey,TValue> - Accessing this property will create the list object if necessary. - - - - Gets or sets the value at the specified index. - - The zero-based index of the value to get or set. - The value of the item at the specified index. - is less than 0.
- -or-
- index is equal to or greater than .
-
- - - Gets or sets the value at the specified index. - - The zero-based index of the value to get or set. - The value of the item at the specified index. - is less than 0.
- -or-
- index is equal to or greater than .
- is a null reference, and the value type of the OrderedDictionary<TKey,TValue> is a value type. - The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . -
- - - Gets a value indicating whether the OrderedDictionary<TKey,TValue> has a fixed size. - - if the OrderedDictionary<TKey,TValue> has a fixed size; otherwise, . The default is . - - - - Gets a value indicating whether the OrderedDictionary<TKey,TValue> collection is read-only. - - if the OrderedDictionary<TKey,TValue> is read-only; otherwise, . The default is . - - A collection that is read-only does not allow the addition, removal, or modification of elements after the collection is created. - A collection that is read-only is simply a collection with a wrapper that prevents modification of the collection; therefore, if changes are made to the underlying collection, the read-only collection reflects those changes. - - - - - Gets an object containing the keys in the OrderedDictionary<TKey,TValue>. - - An object containing the keys in the OrderedDictionary<TKey,TValue>. - The returned object is not a static copy; instead, the collection refers back to the keys in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the key collection. - - - - Gets an object containing the values in the OrderedDictionary<TKey,TValue> collection. - - An object containing the values in the OrderedDictionary<TKey,TValue> collection. - The returned object is not a static copy; instead, the refers back to the values in the original OrderedDictionary<TKey,TValue> collection. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the . - - - - Gets or sets the value with the specified key. - - The key of the value to get or set. - The value associated with the specified key. If the specified key is not found, attempting to get it returns , and attempting to set it creates a new element using the specified key. - - - - Gets or sets the value with the specified key. - - The key of the value to get or set. - The value associated with the specified key. If the specified key is not found, attempting to get it returns , and attempting to set it creates a new element using the specified key. - - - - Gets the number of key/values pairs contained in the OrderedDictionary<TKey,TValue> collection. - - The number of key/value pairs contained in the OrderedDictionary<TKey,TValue> collection. - - - - Gets a value indicating whether access to the OrderedDictionary<TKey,TValue> object is synchronized (thread-safe). - - This method always returns false. - - - - Gets an object that can be used to synchronize access to the OrderedDictionary<TKey,TValue> object. - - An object that can be used to synchronize access to the OrderedDictionary<TKey,TValue> object. - - - - Gets an ICollection<TKey> object containing the keys in the OrderedDictionary<TKey,TValue>. - - An ICollection<TKey> object containing the keys in the OrderedDictionary<TKey,TValue>. - The returned ICollection<TKey> object is not a static copy; instead, the collection refers back to the keys in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the key collection. - - - - Gets an ICollection<TValue> object containing the values in the OrderedDictionary<TKey,TValue>. - - An ICollection<TValue> object containing the values in the OrderedDictionary<TKey,TValue>. - The returned ICollection<TKey> object is not a static copy; instead, the collection refers back to the values in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the value collection. - - - - acquire distributed, non-reentrant lock on key - - global key for this lock - timeout for acquiring lock - timeout for lock, in seconds (stored as value against lock key) - - - - - - unlock key - - - - - - - - - - - - - Locking strategy interface - - - - - This class manages a read lock for a local readers/writer lock, - using the Resource Acquisition Is Initialization pattern - - - - - RAII initialization - - - - - - RAII disposal - - - - - This class manages a write lock for a local readers/writer lock, - using the Resource Acquisition Is Initialization pattern - - - - - - RAII disposal - - - - - manages a "region" in the redis key space - namespace can be cleared by incrementing the generation - - - - - get current generation - - - - - - set new generation - - - - - - redis key for generation - - - - - - get redis key that holds all namespace keys - - - - - - get global cache key - - - - - - - get global key inside of this namespace - - - prefixes can be added for name deconfliction - - - - - replace UniqueCharacter with its double, to avoid name clash - - - - - - - - - - - - - - get locking strategy - - -
-
diff --git a/NuGet/lib/net35/ServiceStack.Redis.dll b/NuGet/lib/net35/ServiceStack.Redis.dll deleted file mode 100644 index 3e52892c..00000000 Binary files a/NuGet/lib/net35/ServiceStack.Redis.dll and /dev/null differ diff --git a/NuGet/servicestack.redis.nuspec b/NuGet/servicestack.redis.nuspec deleted file mode 100644 index a88e5087..00000000 --- a/NuGet/servicestack.redis.nuspec +++ /dev/null @@ -1,23 +0,0 @@ - - - - ServiceStack.Redis - C# Redis client for the Redis NoSQL DB - 3.9.43 - Demis Bellot - Demis Bellot - - C# Redis Client for the worlds fastest distributed NoSQL datastore. Byte[], String and POCO Typed clients. - Thread-Safe Basic and Pooled client managers included. - - https://github.com/ServiceStack/ServiceStack.Redis - https://github.com/ServiceStack/ServiceStack.Redis/blob/master/LICENSE - http://www.servicestack.net/logo-100x100.png - Redis NoSQL Client Distributed Cache PubSub Messaging Transactions - en-US - ServiceStack 2013 and contributors - - - - - diff --git a/NuGet/stackexpress.redis.nuspec b/NuGet/stackexpress.redis.nuspec deleted file mode 100644 index 2e8ec506..00000000 --- a/NuGet/stackexpress.redis.nuspec +++ /dev/null @@ -1,23 +0,0 @@ - - - - StackExpress.Redis - C# Redis client for the Redis NoSQL DB - $version$ - Demis Bellot - Demis Bellot - - C# Redis Client for the worlds fastest distributed NoSQL datastore. Byte[], String and POCO Typed clients. - Thread-Safe Basic and Pooled client managers included. - - https://github.com/ServiceStack/ServiceStack.Redis - https://github.com/ServiceStack/ServiceStack.Redis/blob/master/LICENSE - http://www.servicestack.net/logo-100x100.png - Redis NoSQL Client Distributed Cache PubSub Messaging Transactions - en-US - StackExpress 2012 and contributors - - - - - diff --git a/README.md b/README.md index ed11f737..cfbbe837 100644 --- a/README.md +++ b/README.md @@ -1,355 +1,5 @@ -[Join the new ServiceStack Google+ group](https://plus.google.com/u/0/communities/112445368900682590445) or -follow [@servicestack](http://twitter.com/servicestack) for twitter updates. +Follow [@ServiceStack](https://twitter.com/servicestack), [view the docs](https://docs.servicestack.net), use [StackOverflow](https://stackoverflow.com/questions/ask?tags=servicestack,servicestack.redis) or [Customer Forums](https://forums.servicestack.net/) for support. -# An Open Source C# Client for Redis +# Read ServiceStack.Redis Docs at [docs.servicestack.net/redis](https://docs.servicestack.net/redis/) -## New in v3.9.37 - -### New IRedisClient LUA API's - -The `IRedisClient` API's for [redis server-side LUA support](http://redis.io/commands/eval) have been re-factored into the more user-friendly API's below: - -```csharp -public interface IRedisClient -{ - //Eval/Lua operations - - string ExecLuaAsString(string luaBody, params string[] args); - string ExecLuaAsString(string luaBody, string[] keys, string[] args); - string ExecLuaShaAsString(string sha1, params string[] args); - string ExecLuaShaAsString(string sha1, string[] keys, string[] args); - - int ExecLuaAsInt(string luaBody, params string[] args); - int ExecLuaAsInt(string luaBody, string[] keys, string[] args); - int ExecLuaShaAsInt(string sha1, params string[] args); - int ExecLuaShaAsInt(string sha1, string[] keys, string[] args); - - List ExecLuaAsList(string luaBody, params string[] args); - List ExecLuaAsList(string luaBody, string[] keys, string[] args); - List ExecLuaShaAsList(string sha1, params string[] args); - List ExecLuaShaAsList(string sha1, string[] keys, string[] args); - - string CalculateSha1(string luaBody); - - bool HasLuaScript(string sha1Ref); - Dictionary WhichLuaScriptsExists(params string[] sha1Refs); - void RemoveAllLuaScripts(); - void KillRunningLuaScript(); - string LoadLuaScript(string body); -} -``` - -### Usage Examples - -Here's how you can implement a ZPOP in Lua to remove the items with the lowest rank from a sorted set: - -```csharp -var luaBody = @" - local val = redis.call('zrange', KEYS[1], 0, ARGV[1]-1) - if val then redis.call('zremrangebyrank', KEYS[1], 0, ARGV[1]-1) end - return val"; - -var i = 0; -var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); -alphabet.ForEach(x => Redis.AddItemToSortedSet("zalphabet", x, i++)); - -//Remove the letters with the lowest rank from the sorted set 'zalphabet' -var letters = Redis.ExecLuaAsList(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); -letters.PrintDump(); //[A, B, C] -``` - -And how to implement ZREVPOP to remove items with the highest rank from a sorted set: - -```csharp -var luaBody = @" - local val = redis.call('zrange', KEYS[1], -ARGV[1], -1) - if val then redis.call('zremrangebyrank', KEYS[1], -ARGV[1], -1) end - return val"; - -var i = 0; -var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); -alphabet.ForEach(x => Redis.AddItemToSortedSet("zalphabet", x, i++)); - -//Remove the letters with the highest rank from the sorted set 'zalphabet' -List letters = Redis.ExecLuaAsList(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); - -letters.PrintDump(); //[X, Y, Z] -``` - -### Other examples - -Returning an int: - -```csharp -int intVal = Redis.ExecLuaAsInt("return 123"); //123 -int intVal = Redis.ExecLuaAsInt("return ARGV[1] + ARGV[2]", "10", "20"); //30 -``` - -Returning an string: - -```csharp -var strVal = Redis.ExecLuaAsString(@"return 'Hello, ' .. ARGV[1] .. '!'", "Redis Lua"); //Hello, Redis Lua! -``` - -Returning a List of strings: - -```csharp -Enum.GetNames(typeof(DayOfWeek)).ToList() - .ForEach(x => Redis.AddItemToList("DaysOfWeek", x)); - -var daysOfWeek = Redis.ExecLuaAsList("return redis.call('LRANGE', 'DaysOfWeek', 0, -1)"); -daysOfWeek.PrintDump(); //[Sunday, Monday, Tuesday, ...] -``` - -More examples can be found in the [Redis Eval Lua tests](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs -) - -## Overview - -*The Redis client is an independent project and can be used with or without the ServiceStack webservices framework.* - -[Redis](http://code.google.com/p/redis/) is one of the fastest and most feature-rich key-value stores to come from the [NoSQL](http://en.wikipedia.org/wiki/NoSQL) movement. -It is similar to memcached but the dataset is not volatile, and values can either be strings lists, sets, sorted sets or hashes. - -[ServiceStack's C# Redis Client](https://github.com/ServiceStack/ServiceStack.Redis) is an Open Source C# Redis client based on [Miguel de Icaza](http://twitter.com/migueldeicaza) previous efforts with [redis-sharp](http://github.com/migueldeicaza/redis-sharp). - -There are a number of different APIs available which are all a friendly drop-in with your local IOC: -The `ServiceStack.Redis.RedisClient` class below implements the following interfaces: - - * [ICacheClient](https://github.com/ServiceStack/ServiceStack/wiki/Caching) - If you are using Redis solely as a cache, you should bind to the [ServiceStack's common interface](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Caching) as there already are In-Memory an Memcached implementations available in ServiceStack, allowing you to easily switch providers in-future. - * [IRedisNativeClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisNativeClient) - For those wanting a low-level raw byte access (where you can control your own serialization/deserialization) that map 1:1 with Redis operations of the same name. - -For most cases if you require access to Redis-specific functionality you would want to bind to the interface below: - - * [IRedisClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisClient) - Provides a friendlier, more descriptive API that lets you store values as strings (UTF8 encoding). - * [IRedisTypedClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisTypedClient) - created with `IRedisClient.GetTypedClient()` - it returns a 'strongly-typed client' that provides a typed-interface for all redis value operations that works against any C#/.NET POCO type. - -The class hierachy for the C# Redis clients effectively look like: - - RedisTypedClient (POCO) > RedisClient (string) > RedisNativeClient (raw byte[]) - -Each client is optimized for maximum efficiency and provides layered functionality for maximum developer productivity: - - * The RedisNativeClient exposes raw **byte[]** apis and does no marshalling and passes all values directly to redis. - * The RedisClient assumes **string** values and simply converts strings to UTF8 bytes before sending to Redis - * The RedisTypedClient provides a generic interface allowing you to add POCO values. The POCO types are serialized using [.NETs fastest JSON Serializer](http://www.servicestack.net/mythz_blog/?p=344) which is then converted to UTF8 bytes and sent to Redis. - -At all times you can pick the most optimal Redis Client for your needs so you can achieve maximum efficiency in your applications. - -### Redis Client API Overview -[![Redis Client API](http://servicestack.net/img/Redis-annotated-preview.png)](http://servicestack.net/img/Redis-annotated.png) - -### Thread-safe client managers -For multi-threaded applications you can choose from our different client connection managers: - - * BasicRedisClientManager - a load-balance (master-write and read-slaves) client manager that returns a new [IRedisClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisClient) connection with the defaults specified (faster when accessing a redis-server instance on the same host). - * PooledRedisClientManager - a load-balanced (master-write and read-slaves) client manager that utilizes a pool of redis client connections (faster when accessing a redis-server instance over the network). - -# Download - -You can download the Redis Client in any one of the following ways: - -* Packaged by default in [ServiceStack.dll](https://github.com/ServiceStack/ServiceStack/downloads) -* Available to download separately as a stand-alone [ServiceStack.Redis.dll](https://github.com/ServiceStack/ServiceStack.Redis/downloads) -* As Source Code via Git: `git clone git://github.com/ServiceStack/ServiceStack.Redis.git` -* For those interested in having a GUI admin tool to visualize your Redis data should check out the [Redis Admin UI](http://www.servicestack.net/mythz_blog/?p=381) - -[View the release notes](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Redis-Client-Release-Notes). - -### Redis Server builds for Windows - - * [MS Open Tech - Redis on Windows](https://github.com/MSOpenTech/Redis) - * [Downloads for Cygwin 32bit Redis Server Windows builds](http://code.google.com/p/servicestack/wiki/RedisWindowsDownload). - * [Project that lets you run Redis as a Windows Service](https://github.com/rgl/redis) - * [Another Redis as a Windows Service project, which allows you to run separate service for each Redis instance](https://github.com/kcherenkov/redis-windows-service) - * [Downloads for MinGW 32bit and 64bit Redis Server Windows builds](http://github.com/dmajkic/redis/downloads) - -### Redis Virtual Machines - - * [Run Redis in a Vagrant virtual machine](https://github.com/JasonPunyon/redishobo) - -# Getting Started with the C# Redis client - -###[C# Redis Client wiki](https://github.com/ServiceStack/ServiceStack.Redis/wiki) -Contains all the examples, tutorials and resources you need to get you up to speed with common operations and the latest features. - -[Useful Links on Redis server](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Useful-Redis-Links) - -### Specific Examples - * [Using Transactions in Redis (i.e. MULTI/EXEC/DISCARD)](https://github.com/ServiceStack/ServiceStack.Redis/wiki/RedisTransactions) - * [Using Redis's built-in Publsih/Subscribe pattern for high performance network notifications](https://github.com/ServiceStack/ServiceStack.Redis/wiki/RedisPubSub) - * [Using Redis to create high performance *distributed locks* spannable across multiple app servers](https://github.com/ServiceStack/ServiceStack.Redis/wiki/RedisLocks) - -# Simple example using Redis Lists - -Below is a simple example to give you a flavour of how easy it is to use some of Redis's advanced data structures - in this case Redis Lists: -_Full source code of this example is [viewable online](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/ShippersExample.cs)_ - - using (var redisClient = new RedisClient()) - { - //Create a 'strongly-typed' API that makes all Redis Value operations to apply against Shippers - IRedisTypedClient redis = redisClient.GetTypedClient(); - - //Redis lists implement IList while Redis sets implement ICollection - var currentShippers = redis.Lists["urn:shippers:current"]; - var prospectiveShippers = redis.Lists["urn:shippers:prospective"]; - - currentShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Trains R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Trains, - UniqueRef = Guid.NewGuid() - }); - - currentShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Planes R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Planes, - UniqueRef = Guid.NewGuid() - }); - - var lameShipper = new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "We do everything!", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.All, - UniqueRef = Guid.NewGuid() - }; - - currentShippers.Add(lameShipper); - - Dump("ADDED 3 SHIPPERS:", currentShippers); - - currentShippers.Remove(lameShipper); - - Dump("REMOVED 1:", currentShippers); - - prospectiveShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Trucks R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Automobiles, - UniqueRef = Guid.NewGuid() - }); - - Dump("ADDED A PROSPECTIVE SHIPPER:", prospectiveShippers); - - redis.PopAndPushBetweenLists(prospectiveShippers, currentShippers); - - Dump("CURRENT SHIPPERS AFTER POP n' PUSH:", currentShippers); - Dump("PROSPECTIVE SHIPPERS AFTER POP n' PUSH:", prospectiveShippers); - - var poppedShipper = redis.PopFromList(currentShippers); - Dump("POPPED a SHIPPER:", poppedShipper); - Dump("CURRENT SHIPPERS AFTER POP:", currentShippers); - - //reset sequence and delete all lists - redis.SetSequence(0); - redis.Remove(currentShippers, prospectiveShippers); - Dump("DELETING CURRENT AND PROSPECTIVE SHIPPERS:", currentShippers); - } - - /* - == EXAMPLE OUTPUT == - - ADDED 3 SHIPPERS: - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - Id:3,CompanyName:We do everything!,ShipperType:All,DateCreated:2010-01-31T11:53:37.8009371Z,UniqueRef:d0c249bbbaf84da39fc4afde1b34e332 - - REMOVED 1: - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - - ADDED A PROSPECTIVE SHIPPER: - Id:4,CompanyName:Trucks R Us,ShipperType:Automobiles,DateCreated:2010-01-31T11:53:37.8539401Z,UniqueRef:67d7d4947ebc4b0ba5c4d42f5d903bec - - CURRENT SHIPPERS AFTER POP n' PUSH: - Id:4,CompanyName:Trucks R Us,ShipperType:Automobiles,DateCreated:2010-01-31T11:53:37.8539401Z,UniqueRef:67d7d4947ebc4b0ba5c4d42f5d903bec - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - - PROSPECTIVE SHIPPERS AFTER POP n' PUSH: - - POPPED a SHIPPER: - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - - CURRENT SHIPPERS AFTER POP: - Id:4,CompanyName:Trucks R Us,ShipperType:Automobiles,DateCreated:2010-01-31T11:53:37.8539401Z,UniqueRef:67d7d4947ebc4b0ba5c4d42f5d903bec - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - - DELETING CURRENT AND PROSPECTIVE SHIPPERS: - */ - -More examples are available in the [RedisExamples Redis examples page] and in the comprehensive -[test suite](https://github.com/ServiceStack/ServiceStack.Redis/tree/master/tests/ServiceStack.Redis.Tests) - - -## Speed -One of the best things about Redis is the speed - it is quick. - -[This example](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/RedisClientTests.cs) -below stores and gets the entire [Northwind database](http://code.google.com/p/servicestack/source/browse/trunk/Common/Northwind.Benchmarks/Northwind.Common/DataModel/NorthwindData.cs) (3202 records) in less *1.2 secs* - we've never had it so quick! - -_(Running inside a VS.NET/R# unit test on a 3 year old iMac)_ - - using (var client = new RedisClient()) - { - var before = DateTime.Now; - client.StoreAll(NorthwindData.Categories); - client.StoreAll(NorthwindData.Customers); - client.StoreAll(NorthwindData.Employees); - client.StoreAll(NorthwindData.Shippers); - client.StoreAll(NorthwindData.Orders); - client.StoreAll(NorthwindData.Products); - client.StoreAll(NorthwindData.OrderDetails); - client.StoreAll(NorthwindData.CustomerCustomerDemos); - client.StoreAll(NorthwindData.Regions); - client.StoreAll(NorthwindData.Territories); - client.StoreAll(NorthwindData.EmployeeTerritories); - - Console.WriteLine("Took {0}ms to store the entire Northwind database ({1} records)", - (DateTime.Now - before).TotalMilliseconds, totalRecords); - - - before = DateTime.Now; - var categories = client.GetAll(); - var customers = client.GetAll(); - var employees = client.GetAll(); - var shippers = client.GetAll(); - var orders = client.GetAll(); - var products = client.GetAll(); - var orderDetails = client.GetAll(); - var customerCustomerDemos = client.GetAll(); - var regions = client.GetAll(); - var territories = client.GetAll(); - var employeeTerritories = client.GetAll(); - - Console.WriteLine("Took {0}ms to get the entire Northwind database ({1} records)", - (DateTime.Now - before).TotalMilliseconds, totalRecords); - } - /* - == EXAMPLE OUTPUT == - - Took 1020.0583ms to store the entire Northwind database (3202 records) - Took 132.0076ms to get the entire Northwind database (3202 records) - */ - - -Note: The total time taken includes an extra Redis operation for each record to store the id in a Redis set for each -type as well as serializing and de-serializing each record using Service Stack's TypeSerializer. - - -# Community Resources - - - [Redis and VB.Net](http://blogs.lessthandot.com/index.php/DataMgmt/DBProgramming/redis-and-vb-net) by [@chrissie1](https://twitter.com/chrissie1) - - [Using ServiceStack.Redis Part 2: Sets and Hashes](http://michaelsarchet.com/using-the-servicestack-redis-client/) by [@msarchet](http://twitter.com/msarchet) - - [Using the ServiceStack.Redis Client](http://michaelsarchet.com/using-the-servicestack-redis-client/) by [@msarchet](http://twitter.com/msarchet) - - [Implementing ServiceStack.Redis.RedisClient (.NET Client for Redis)](http://www.narizwallace.com/2012/10/implementing-servicestack-redis-redisclient-net-client-for-redis/) by [@NarizWallace](https://twitter.com/NarizWallace) - - [Getting started with Redis in ASP.NET under Windows](http://maxivak.com/getting-started-with-redis-and-asp-net-mvc-under-windows/) by [@maxivak](https://twitter.com/maxivak) - - [Using Redis on Windows with ServiceStack](http://www.clippersoft.net/using-redis-on-windows-with-servicestack/) +### This repository [has moved](https://docs.servicestack.net/mono-repo) to [github.com/ServiceStack/ServiceStack/ServiceStack.Redis](https://github.com/ServiceStack/ServiceStack/tree/main/ServiceStack.Redis) diff --git a/build/Build.proj b/build/Build.proj deleted file mode 100644 index 0c635e20..00000000 --- a/build/Build.proj +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - 3 - 9 - $(BUILD_NUMBER) - - - - $(MSBuildProjectDirectory)/.. - $(BuildSolutionDir)/src - Release - $(BuildSolutionDir)/src/.nuget/nuget.exe - $(BuildSolutionDir)/NuGet/ - $(MSBuildProjectDirectory) - $(BuildSolutionDir)/NuGet/lib - $(BuildSolutionDir)/NuGet/stackexpress.redis.nuspec - $(MajorVersion).$(MinorVersion).$(PatchVersion).0 - -unstable - $(MajorVersion).$(MinorVersion).$(PatchVersion)$(UnstableTag) - - - - - - - - - - - BeforeBuildSolutions; - BuildSolutions - - - - - - - - - - - - - - - - - - - - - - - - - - - - \d+\.\d+\.\d+\.\d+ - $(Version) - - - - - - - - - - - - - \ No newline at end of file diff --git a/build/NuGet.exe b/build/NuGet.exe deleted file mode 100644 index 4300c9ff..00000000 Binary files a/build/NuGet.exe and /dev/null differ diff --git a/build/NuGetPack.cmd b/build/NuGetPack.cmd deleted file mode 100644 index 4db9c785..00000000 --- a/build/NuGetPack.cmd +++ /dev/null @@ -1 +0,0 @@ -nuget pack ..\NuGet\servicestack.redis.nuspec -symbols \ No newline at end of file diff --git a/build/build-core.proj b/build/build-core.proj new file mode 100644 index 00000000..3ac34b8b --- /dev/null +++ b/build/build-core.proj @@ -0,0 +1,80 @@ + + + + + + 6 + 0 + $(BUILD_NUMBER) + + + + $(MSBuildProjectDirectory)/.. + $(BuildSolutionDir)/src + $(BuildSolutionDir)/tests + Release + $(BuildSolutionDir)/NuGet/ + $(MajorVersion).$(MinorVersion).$(PatchVersion) + + + + + BeforeBuildSolutions; + BuildSolutions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <Version>[^<]* + <Version>$(PackageVersion) + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build/build.bat b/build/build.bat index 2726555f..c51cb92d 100644 --- a/build/build.bat +++ b/build/build.bat @@ -1,13 +1,3 @@ -REM SET BUILD=Debug -SET BUILD=Release - -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.Redis.* ..\NuGet\lib\net35 - -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.* ..\..\ServiceStack\release\latest\ServiceStack.Redis - -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.* ..\..\ServiceStack\lib -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.* ..\..\ServiceStack.Examples\lib -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.Redis.* ..\..\ServiceStack.Contrib\lib -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.Redis.* ..\..\ServiceStack.RedisWebServices\lib - +SET MSBUILD="C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" +%MSBUILD% build.proj /property:Configuration=Release;MinorVersion=4;PatchVersion=1 diff --git a/build/build.proj b/build/build.proj new file mode 100644 index 00000000..7899a808 --- /dev/null +++ b/build/build.proj @@ -0,0 +1,79 @@ + + + + + + 6 + 0 + $(BUILD_NUMBER) + + + + $(MSBuildProjectDirectory)/.. + $(BuildSolutionDir)/src + $(BuildSolutionDir)/tests + Release + $(BuildSolutionDir)/NuGet/ + $(MajorVersion).$(MinorVersion).$(PatchVersion) + + + + + BeforeBuildSolutions; + BuildSolutions + + + + + + + + + + + + + + + + + + + + + + + + + + + + <Version>[^<]* + <Version>$(PackageVersion) + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build/Build.tasks b/build/build.tasks similarity index 100% rename from build/Build.tasks rename to build/build.tasks diff --git a/build/release/MonoDevelop/ServiceStack.Redis.zip b/build/release/MonoDevelop/ServiceStack.Redis.zip deleted file mode 100644 index 476d6fac..00000000 Binary files a/build/release/MonoDevelop/ServiceStack.Redis.zip and /dev/null differ diff --git a/docs/pages/images/1-CreateProject.png b/docs/pages/images/1-CreateProject.png new file mode 100644 index 00000000..bc0617a5 Binary files /dev/null and b/docs/pages/images/1-CreateProject.png differ diff --git a/docs/pages/images/10-projectjson.png b/docs/pages/images/10-projectjson.png new file mode 100644 index 00000000..ec38bcd5 Binary files /dev/null and b/docs/pages/images/10-projectjson.png differ diff --git a/docs/pages/images/11-dotnetrestore.png b/docs/pages/images/11-dotnetrestore.png new file mode 100644 index 00000000..44a93985 Binary files /dev/null and b/docs/pages/images/11-dotnetrestore.png differ diff --git a/docs/pages/images/12-dotnetrun.png b/docs/pages/images/12-dotnetrun.png new file mode 100644 index 00000000..7040a5b6 Binary files /dev/null and b/docs/pages/images/12-dotnetrun.png differ diff --git a/docs/pages/images/2-Solution.png b/docs/pages/images/2-Solution.png new file mode 100644 index 00000000..e81dc372 Binary files /dev/null and b/docs/pages/images/2-Solution.png differ diff --git a/docs/pages/images/3-AddNugetPackage.png b/docs/pages/images/3-AddNugetPackage.png new file mode 100644 index 00000000..f0ae3796 Binary files /dev/null and b/docs/pages/images/3-AddNugetPackage.png differ diff --git a/docs/pages/images/4-SearchNugetPackage.png b/docs/pages/images/4-SearchNugetPackage.png new file mode 100644 index 00000000..f697a2dd Binary files /dev/null and b/docs/pages/images/4-SearchNugetPackage.png differ diff --git a/docs/pages/images/5-InstallNugetPackage.png b/docs/pages/images/5-InstallNugetPackage.png new file mode 100644 index 00000000..c421ae49 Binary files /dev/null and b/docs/pages/images/5-InstallNugetPackage.png differ diff --git a/docs/pages/images/6-AcceptLicence.png b/docs/pages/images/6-AcceptLicence.png new file mode 100644 index 00000000..695367f3 Binary files /dev/null and b/docs/pages/images/6-AcceptLicence.png differ diff --git a/docs/pages/images/7-Program.png b/docs/pages/images/7-Program.png new file mode 100644 index 00000000..a3e6dd18 Binary files /dev/null and b/docs/pages/images/7-Program.png differ diff --git a/docs/pages/images/8-Output.png b/docs/pages/images/8-Output.png new file mode 100644 index 00000000..9c4b8838 Binary files /dev/null and b/docs/pages/images/8-Output.png differ diff --git a/docs/pages/images/9-dotnetnew.png b/docs/pages/images/9-dotnetnew.png new file mode 100644 index 00000000..8008f522 Binary files /dev/null and b/docs/pages/images/9-dotnetnew.png differ diff --git a/docs/pages/netcore.md b/docs/pages/netcore.md new file mode 100644 index 00000000..85fd5aa3 --- /dev/null +++ b/docs/pages/netcore.md @@ -0,0 +1,152 @@ +# Using ServiceStack.Redis on .NET Core + +This guide shows how to create and execute .NET Core console application which +uses ServiceStack.Redis + +### Install .NET Core + +At the first step you need to install [Visual Studio 2015 update 3](https://go.microsoft.com/fwlink/?LinkId=691129) and [.NET Core 1.0.0 - VS 2015 Tooling Preview](https://go.microsoft.com/fwlink/?LinkId=817245). + +To get more details about Visual Studio 2015 update 3 and .NET Core installation +you can visit [.NET Core](https://www.microsoft.com/net/core#windows) site + +### Create .NET Core Application + +In Visual Studio click `File-> New-> Project` and select **.NET Core/Console Application (.NET Core)** +from VS templates. + +![Create .NET Core Project](images/1-CreateProject.png) + +You will get following structure in Solution Explorer. + +![Solution Explorer](images/2-Solution.png) + +Right click on the project and select **Manage NuGet Packages** + +![Add Reference](images/3-AddNugetPackage.png) + +In opened window click **Browse** tab, type `ServiceStack.Redis.Core` and hit **Enter** + +![Add Reference](images/4-SearchNugetPackage.png) + +Select package and click **Install** button on the right pane + +![Add Reference](images/5-InstallNugetPackage.png) + +Visual Studio asks to read and accept licence, click **Accept** button + +![Solution Explorer](images/6-AcceptLicence.png) + +Then open file `Program.cs` and write the code. Change the address `localhost:6379` to the +host:port where your Redis server is located. + +```csharp +using System; +using ServiceStack.Redis; + +namespace ssredis +{ + public class Program + { + public static void Main(string[] args) + { + var manager = new RedisManagerPool("localhost:6379"); + using (var client = manager.GetClient()) + { + client.Set("foo", "bar"); + Console.WriteLine("foo={0}", client.Get("foo")); + } + } + } +} +``` + +Then hit "run" button **(F5)**. You should get following output: + +![Output](images/8-Output.png) + +## Run ServiceStack.Redis on Linux + +### Install .NET Core + +Suppose that you have ubuntu 16.04 installed (to see installation instructions for other OS you can +visit [.NET Core site](https://www.microsoft.com/net/core)). Run commands in the console: + + sudo sh -c 'echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/dotnet-release/ xenial main" > /etc/apt/sources.list.d/dotnetdev.list' + sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893 + sudo apt-get update + sudo apt-get install curl dotnet-dev-1.0.0-preview2-003121 + +### Create Project + + dotnet new + +This command will create `project.json` and `Program.cs` files in current folder. + +![dotnet new](images/9-dotnetnew.png) + +Open `project.json` for edit: + + nano project.json + +and edit empty `dependencies: {}` section to add `ServiceStack.Redis.Core` reference: + + dependencies: { + "ServiceStack.Redis.Core" : "1.0.2" + } + +![dotnet new](images/10-projectjson.png) + +type `Ctrl+X` to exit. Answer 'yes' to save changes in the file. + +then open and edit `Program.cs` file. Type `nano Program.cs` and copy the code +(change `localhost:6379` to `host:port` where your Redis Server is located. + +```csharp +using System; +using ServiceStack.Redis; + +namespace ssredis +{ + public class Program + { + public static void Main(string[] args) + { + var manager = new RedisManagerPool("localhost:6379"); + using (var client = manager.GetClient()) + { + client.Set("foo", "bar"); + Console.WriteLine("foo={0}", client.Get("foo")); + } + } + } +} +``` +### Run the Project + +In console type: + + dotnet restore + dotnet run + +The first command restores packages (note that packages version may vary from the version on screenshot) and second compiles and runs the application + and outputs value of `foo` key to console. + +![dotnet new](images/11-dotnetrestore.png) + +![dotnet new](images/12-dotnetrun.png) + +## Limitations + +`ServiceStack.Redis.Core` is implemented to support [.NETStandard 1.3](https://github.com/dotnet/corefx/blob/master/Documentation/architecture/net-platform-standard.md) interface. +Due to .NET Core does not support `BinaryFormatter` and `Remoting`, `ServiceStack.Redis.Core` does not support classes in `ServiceStack.Redis.Support` namespace + + - RedisChronologicalWorkQueue + - RedisSequentionalWorkQueue + - RedisSimpleWorkQueue + - SerializingRedisClient + - ObjectSerializer + - OptimizedObjectSerializer + - Locking.DistributedLock + - Diagnostic.TrackingRedisClientProxy + - Diagnostic.TrackingRedisClientManager diff --git a/lib/ServiceStack.Common.dll b/lib/ServiceStack.Common.dll deleted file mode 100755 index a786b2d9..00000000 Binary files a/lib/ServiceStack.Common.dll and /dev/null differ diff --git a/lib/ServiceStack.Interfaces.dll b/lib/ServiceStack.Interfaces.dll deleted file mode 100755 index 5ae52deb..00000000 Binary files a/lib/ServiceStack.Interfaces.dll and /dev/null differ diff --git a/lib/ServiceStack.Text.XML b/lib/ServiceStack.Text.XML deleted file mode 100644 index 8a76bc67..00000000 --- a/lib/ServiceStack.Text.XML +++ /dev/null @@ -1,605 +0,0 @@ - - - - ServiceStack.Text - - - - - Utils to load types - - - - - Find the type from the name supplied - - [typeName] or [typeName, assemblyName] - - - - - The top-most interface of the given type, if any. - - - - - Find type if it exists - - - - The type if it exists - - - - If AlwaysUseUtc is set to true then convert all DateTime to UTC. - - - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - Get the type(string) constructor if exists - - The type. - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Class to hold - - - - - - A fast, standards-based, serialization-issue free DateTime serailizer. - - - - - A hashset implementation that uses an IDictionary - - - - - Determines whether this serializer can create the specified type from a string. - - The type. - - true if this instance [can create from string] the specified type; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Deserializes from reader. - - The reader. - - - - - Serializes to string. - - The value. - - - - - Serializes to writer. - - The value. - The writer. - - - - Sets which format to use when serializing TimeSpans - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - if the is configured - to support web-friendly serialized formats, ie emitting lowercase_underscore_casing for JSON - - - - - Define how property names are mapped during deserialization - - - - - Gets or sets a value indicating if the framework should throw serialization exceptions - or continue regardless of deserialization errors. If the framework - will throw; otherwise, it will parse as many fields as possible. The default is . - - - - - Gets or sets a value indicating if the framework should always convert to UTC format instead of local time. - - - - - Gets or sets a value indicating if unicode symbols should be serialized as "\uXXXX". - - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - Set this to enable your own type construction provider. - This is helpful for integration with IoC containers where you need to call the container constructor. - Return null if you don't know how to construct the type and the parameterless constructor will be used. - - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - Always emit type info for this type. Takes precedence over ExcludeTypeInfo - - - - - Never emit type info for this type - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - Define custom serialization fn for BCL Structs - - - - - Define custom raw serialization fn - - - - - Define custom serialization hook - - - - - Define custom deserialization fn for BCL Structs - - - - - Define custom raw deserialization fn for objects - - - - - Exclude specific properties of this type from being serialized - - - - - Opt-in flag to set some Value Types to be treated as a Ref Type - - - - - Whether there is a fn (raw or otherwise) - - - - - The property names on target types must match property names in the JSON source - - - - - The property names on target types may not match the property names in the JSON source - - - - - Uses the xsd format like PT15H10M20S - - - - - Uses the standard .net ToString method of the TimeSpan class - - - - - Get JSON string value converted to T - - - - - Get JSON string value - - - - - Get unescaped string value - - - - - Get unescaped string value - - - - - Write JSON Array, Object, bool or number values as raw string - - - - - Get JSON string value - - - - - Creates an instance of a Type from a string value - - - - - Parses the specified value. - - The value. - - - - - Shortcut escape when we're sure value doesn't contain any escaped chars - - - - - - - Given a character as utf32, returns the equivalent string provided that the character - is legal json. - - - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Implement the serializer using a more static approach - - - - - - Implement the serializer using a more static approach - - - - - - Pretty Thread-Safe cache class from: - http://code.google.com/p/dapper-dot-net/source/browse/Dapper/SqlMapper.cs - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - Represents an individual object, allowing access to members by-name - - - - - Use the target types definition of equality - - - - - Obtain the hash of the target object - - - - - Use the target's definition of a string representation - - - - - Wraps an individual object, allowing by-name access to that instance - - - - - Get or Set the value of a named member for the underlying object - - - - - The object represented by this instance - - - - - Provides by-name member-access to objects of a given type - - - - - Create a new instance of this type - - - - - Provides a type-specific accessor, allowing by-name access for all objects of that type - - The accessor is cached internally; a pre-existing accessor may be returned - - - - Does this type support new instances via a parameterless constructor? - - - - - Get or set the value of a named member on the target instance - - - - - Implement the serializer using a more static approach - - - - - - @jonskeet: Collection of utility methods which operate on streams. - r285, February 26th 2009: http://www.yoda.arachsys.com/csharp/miscutil/ - - - - - Reads the given stream up to the end, returning the data as a byte - array. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer size. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer for transferring data. Note that the - current contents of the buffer is ignored, so the buffer needn't - be cleared beforehand. - - - - - Copies all the data from one stream into another. - - - - - Copies all the data from one stream into another, using a buffer - of the given size. - - - - - Copies all the data from one stream into another, using the given - buffer for transferring data. Note that the current contents of - the buffer is ignored, so the buffer needn't be cleared beforehand. - - - - - Reads exactly the given number of bytes from the specified stream. - If the end of the stream is reached before the specified amount - of data is read, an exception is thrown. - - - - - Reads into a buffer, filling it completely. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Same as ReadExactly, but without the argument checks. - - - - - Converts from base: 0 - 62 - - The source. - From. - To. - - - - - Skip the encoding process for 'safe strings' - - - - - - - A class to allow the conversion of doubles to string representations of - their exact decimal values. The implementation aims for readability over - efficiency. - - Courtesy of @JonSkeet - http://www.yoda.arachsys.com/csharp/DoubleConverter.cs - - - - - - - - How many digits are *after* the decimal point - - - - - Constructs an arbitrary decimal expansion from the given long. - The long must not be negative. - - - - - Multiplies the current expansion by the given amount, which should - only be 2 or 5. - - - - - Shifts the decimal point; a negative value makes - the decimal expansion bigger (as fewer digits come after the - decimal place) and a positive value makes the decimal - expansion smaller. - - - - - Removes leading/trailing zeroes from the expansion. - - - - - Converts the value to a proper decimal string representation. - - - - - Creates an instance of a Type from a string value - - - - - Determines whether the specified type is convertible from string. - - The type. - - true if the specified type is convertible from string; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Parses the specified type. - - The type. - The value. - - - - - Useful extension method to get the Dictionary[string,string] representation of any POCO type. - - - - - - Recursively prints the contents of any POCO object in a human-friendly, readable format - - - - - - Print Dump to Console.WriteLine - - - - - Print string.Format to Console.WriteLine - - - - - Parses the specified value. - - The value. - - - - diff --git a/lib/ServiceStack.Text.dll b/lib/ServiceStack.Text.dll deleted file mode 100755 index 678ad79d..00000000 Binary files a/lib/ServiceStack.Text.dll and /dev/null differ diff --git a/lib/tests/Moq.dll b/lib/tests/Moq.dll deleted file mode 100644 index abcb72ee..00000000 Binary files a/lib/tests/Moq.dll and /dev/null differ diff --git a/lib/tests/Moq.pdb b/lib/tests/Moq.pdb deleted file mode 100644 index 034ab186..00000000 Binary files a/lib/tests/Moq.pdb and /dev/null differ diff --git a/lib/tests/Northwind.Common.dll b/lib/tests/Northwind.Common.dll deleted file mode 100644 index 972e8930..00000000 Binary files a/lib/tests/Northwind.Common.dll and /dev/null differ diff --git a/lib/tests/Northwind.Common.pdb b/lib/tests/Northwind.Common.pdb deleted file mode 100644 index dfc5120c..00000000 Binary files a/lib/tests/Northwind.Common.pdb and /dev/null differ diff --git a/lib/tests/RazorEngine.dll b/lib/tests/RazorEngine.dll deleted file mode 100644 index 2b26c7d3..00000000 Binary files a/lib/tests/RazorEngine.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.CacheAccess.Providers.dll b/lib/tests/ServiceStack.CacheAccess.Providers.dll deleted file mode 100644 index 02529f0c..00000000 Binary files a/lib/tests/ServiceStack.CacheAccess.Providers.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.CacheAccess.Providers.pdb b/lib/tests/ServiceStack.CacheAccess.Providers.pdb deleted file mode 100644 index 204cfdec..00000000 Binary files a/lib/tests/ServiceStack.CacheAccess.Providers.pdb and /dev/null differ diff --git a/lib/tests/ServiceStack.Common.Tests.dll b/lib/tests/ServiceStack.Common.Tests.dll deleted file mode 100644 index f5dc6443..00000000 Binary files a/lib/tests/ServiceStack.Common.Tests.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Common.Tests.dll.config b/lib/tests/ServiceStack.Common.Tests.dll.config deleted file mode 100644 index e1ab4308..00000000 --- a/lib/tests/ServiceStack.Common.Tests.dll.config +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/lib/tests/ServiceStack.Common.Tests.pdb b/lib/tests/ServiceStack.Common.Tests.pdb deleted file mode 100644 index 67da230e..00000000 Binary files a/lib/tests/ServiceStack.Common.Tests.pdb and /dev/null differ diff --git a/lib/tests/ServiceStack.Common.dll b/lib/tests/ServiceStack.Common.dll deleted file mode 100644 index a98d9bcf..00000000 Binary files a/lib/tests/ServiceStack.Common.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Interfaces.dll b/lib/tests/ServiceStack.Interfaces.dll deleted file mode 100644 index 87a54d28..00000000 Binary files a/lib/tests/ServiceStack.Interfaces.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Messaging.Tests.dll b/lib/tests/ServiceStack.Messaging.Tests.dll deleted file mode 100644 index 3f5b8821..00000000 Binary files a/lib/tests/ServiceStack.Messaging.Tests.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Messaging.Tests.pdb b/lib/tests/ServiceStack.Messaging.Tests.pdb deleted file mode 100644 index 0f4eb48b..00000000 Binary files a/lib/tests/ServiceStack.Messaging.Tests.pdb and /dev/null differ diff --git a/lib/tests/ServiceStack.Redis.dll b/lib/tests/ServiceStack.Redis.dll deleted file mode 100644 index 8b75a362..00000000 Binary files a/lib/tests/ServiceStack.Redis.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.ServiceInterface.dll b/lib/tests/ServiceStack.ServiceInterface.dll deleted file mode 100644 index 3fbe4ea4..00000000 Binary files a/lib/tests/ServiceStack.ServiceInterface.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.ServiceInterface.pdb b/lib/tests/ServiceStack.ServiceInterface.pdb deleted file mode 100644 index 92c893ee..00000000 Binary files a/lib/tests/ServiceStack.ServiceInterface.pdb and /dev/null differ diff --git a/lib/tests/ServiceStack.Text.dll b/lib/tests/ServiceStack.Text.dll deleted file mode 100644 index 7cda594d..00000000 Binary files a/lib/tests/ServiceStack.Text.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Text.xml b/lib/tests/ServiceStack.Text.xml deleted file mode 100644 index 9d9f880a..00000000 --- a/lib/tests/ServiceStack.Text.xml +++ /dev/null @@ -1,474 +0,0 @@ - - - - ServiceStack.Text - - - - - Determines whether this serializer can create the specified type from a string. - - The type. - - true if this instance [can create from string] the specified type; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Deserializes from reader. - - The reader. - - - - - Serializes to string. - - The value. - - - - - Serializes to writer. - - The value. - The writer. - - - - Parses the specified value. - - The value. - - - - - A class to allow the conversion of doubles to string representations of - their exact decimal values. The implementation aims for readability over - efficiency. - - Courtesy of @JonSkeet - http://www.yoda.arachsys.com/csharp/DoubleConverter.cs - - - - - - - - How many digits are *after* the decimal point - - - - - Constructs an arbitrary decimal expansion from the given long. - The long must not be negative. - - - - - Multiplies the current expansion by the given amount, which should - only be 2 or 5. - - - - - Shifts the decimal point; a negative value makes - the decimal expansion bigger (as fewer digits come after the - decimal place) and a positive value makes the decimal - expansion smaller. - - - - - Removes leading/trailing zeroes from the expansion. - - - - - Converts the value to a proper decimal string representation. - - - - - Implement the serializer using a more static approach - - - - - - Pretty Thread-Safe cache class from: - http://code.google.com/p/dapper-dot-net/source/browse/Dapper/SqlMapper.cs - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - A fast, standards-based, serialization-issue free DateTime serailizer. - - - - - Creates an instance of a Type from a string value - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Get the type(string) constructor if exists - - The type. - - - - - Implement the serializer using a more static approach - - - - - - Represents an individual object, allowing access to members by-name - - - - - Use the target types definition of equality - - - - - Obtain the hash of the target object - - - - - Use the target's definition of a string representation - - - - - Wraps an individual object, allowing by-name access to that instance - - - - - Get or Set the value of a named member for the underlying object - - - - - The object represented by this instance - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - Gets or sets a value indicating if the framework should throw serialization exceptions - or continue regardless of deserialization errors. If the framework - will throw; otherwise, it will parse as many fields as possible. The default is . - - - - - Never emit type info for this type - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - Define custom serialization fn for BCL Structs - - - - - Define custom deserialization fn for BCL Structs - - - - - Exclude specific properties of this type from being serialized - - - - - Opt-in flag to set some Value Types to be treated as a Ref Type - - - - - Creates an instance of a Type from a string value - - - - - Determines whether the specified type is convertible from string. - - The type. - - true if the specified type is convertible from string; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Parses the specified type. - - The type. - The value. - - - - - Useful extension method to get the Dictionary[string,string] representation of any POCO type. - - - - - - Recursively prints the contents of any POCO object in a human-friendly, readable format - - - - - - Print Dump to Console.WriteLine - - - - - Print string.Format to Console.WriteLine - - - - - A hashset implementation that uses an IDictionary - - - - - Shortcut escape when we're sure value doesn't contain any escaped chars - - - - - - - Since Silverlight doesn't have char.ConvertFromUtf32() so putting Mono's implemenation inline. - - - - - - - Class to hold - - - - - - @jonskeet: Collection of utility methods which operate on streams. - r285, February 26th 2009: http://www.yoda.arachsys.com/csharp/miscutil/ - - - - - Reads the given stream up to the end, returning the data as a byte - array. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer size. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer for transferring data. Note that the - current contents of the buffer is ignored, so the buffer needn't - be cleared beforehand. - - - - - Copies all the data from one stream into another. - - - - - Copies all the data from one stream into another, using a buffer - of the given size. - - - - - Copies all the data from one stream into another, using the given - buffer for transferring data. Note that the current contents of - the buffer is ignored, so the buffer needn't be cleared beforehand. - - - - - Reads exactly the given number of bytes from the specified stream. - If the end of the stream is reached before the specified amount - of data is read, an exception is thrown. - - - - - Reads into a buffer, filling it completely. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Same as ReadExactly, but without the argument checks. - - - - - Utils to load types - - - - - Find the type from the name supplied - - [typeName] or [typeName, assemblyName] - - - - - Find type if it exists - - - - The type if it exists - - - - Converts from base: 0 - 62 - - The source. - From. - To. - - - - - Skip the encoding process for 'safe strings' - - - - - - - Provides by-name member-access to objects of a given type - - - - - Create a new instance of this type - - - - - Provides a type-specific accessor, allowing by-name access for all objects of that type - - The accessor is cached internally; a pre-existing accessor may be returned - - - - Does this type support new instances via a parameterless constructor? - - - - - Get or set the value of a named member on the target instance - - - - - Implement the serializer using a more static approach - - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Parses the specified value. - - The value. - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - diff --git a/lib/tests/ServiceStack.dll b/lib/tests/ServiceStack.dll deleted file mode 100644 index 01485bdc..00000000 Binary files a/lib/tests/ServiceStack.dll and /dev/null differ diff --git a/lib/tests/nunit-console-runner.dll b/lib/tests/nunit-console-runner.dll deleted file mode 100644 index 3f9c0e93..00000000 Binary files a/lib/tests/nunit-console-runner.dll and /dev/null differ diff --git a/lib/tests/nunit-console-x86.exe b/lib/tests/nunit-console-x86.exe deleted file mode 100644 index 83849f3a..00000000 Binary files a/lib/tests/nunit-console-x86.exe and /dev/null differ diff --git a/lib/tests/nunit-console-x86.exe.config b/lib/tests/nunit-console-x86.exe.config deleted file mode 100644 index 2d786075..00000000 --- a/lib/tests/nunit-console-x86.exe.config +++ /dev/null @@ -1,87 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/lib/tests/nunit-console.exe b/lib/tests/nunit-console.exe deleted file mode 100644 index 9f619057..00000000 Binary files a/lib/tests/nunit-console.exe and /dev/null differ diff --git a/lib/tests/nunit-console.exe.config b/lib/tests/nunit-console.exe.config deleted file mode 100644 index 2d786075..00000000 --- a/lib/tests/nunit-console.exe.config +++ /dev/null @@ -1,87 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/lib/tests/nunit.core.dll b/lib/tests/nunit.core.dll deleted file mode 100644 index 6fd79cf1..00000000 Binary files a/lib/tests/nunit.core.dll and /dev/null differ diff --git a/lib/tests/nunit.core.interfaces.dll b/lib/tests/nunit.core.interfaces.dll deleted file mode 100644 index 2794827b..00000000 Binary files a/lib/tests/nunit.core.interfaces.dll and /dev/null differ diff --git a/lib/tests/nunit.framework.dll b/lib/tests/nunit.framework.dll deleted file mode 100644 index 2729ddf2..00000000 Binary files a/lib/tests/nunit.framework.dll and /dev/null differ diff --git a/lib/tests/nunit.framework.xml b/lib/tests/nunit.framework.xml deleted file mode 100644 index 911ebf8b..00000000 --- a/lib/tests/nunit.framework.xml +++ /dev/null @@ -1,5622 +0,0 @@ - - - - nunit.framework - - - - - EmptyStringConstraint tests whether a string is empty. - - - - - EmptyConstraint tests a whether a string or collection is empty, - postponing the decision about which test is applied until the - type of the actual argument is known. - - - - - The Constraint class is the base of all built-in or - user-defined constraints in NUnit. It provides the operator - overloads used to combine constraints. - - - - - Static UnsetObject used to detect derived constraints - failing to set the actual value. - - - - - If true, all string comparisons will ignore case - - - - - If true, strings in error messages will be clipped - - - - - If true, arrays will be treated as collections, allowing - those of different dimensions to be compared - - - - - If non-zero, equality comparisons within the specified - tolerance will succeed. - - - - - IComparer object used in comparisons for some constraints. - - - - - The actual value being tested against a constraint - - - - - Flag the constraint to use a tolerance when determining equality. - Currently only used for doubles and floats. - - Tolerance to be used - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Write the failure message to the MessageWriter provided - as an argument. The default implementation simply passes - the constraint and the actual value to the writer, which - then displays the constraint description and the value. - - Constraints that need to provide additional details, - such as where the error occured can override this. - - The MessageWriter on which to display the message - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - This operator creates a constraint that is satisfied only if both - argument constraints are satisfied. - - - - - This operator creates a constraint that is satisfied if either - of the argument constraints is satisfied. - - - - - This operator creates a constraint that is satisfied if the - argument constraint is not satisfied. - - - - - Flag the constraint to ignore case and return self. - - - - - Flag the constraint to suppress string clipping - and return self. - - - - - Flag the constraint to compare arrays as collections - and return self. - - - - - Class used to detect any derived constraints - that fail to set the actual value in their - Matches override. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - SubstringConstraint can test whether a string contains - the expected substring. - - - - - Initializes a new instance of the class. - - The expected. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - StartsWithConstraint can test whether a string starts - with an expected substring. - - - - - Initializes a new instance of the class. - - The expected string - - - - Test whether the constraint is matched by the actual value. - This is a template method, which calls the IsMatch method - of the derived class. - - - - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - EndsWithConstraint can test whether a string ends - with an expected substring. - - - - - Initializes a new instance of the class. - - The expected string - - - - Test whether the constraint is matched by the actual value. - This is a template method, which calls the IsMatch method - of the derived class. - - - - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - RegexConstraint can test whether a string matches - the pattern provided. - - - - - Initializes a new instance of the class. - - The pattern. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - ConstraintBuilder is used to resolve the Not and All properties, - which serve as prefix operators for constraints. With the addition - of an operand stack, And and Or could be supported, but we have - left them out in favor of a simpler, more type-safe implementation. - Use the & and | operator overloads to combine constraints. - - - - - Implicitly convert ConstraintBuilder to an actual Constraint - at the point where the syntax demands it. - - - - - - - Resolves the chain of constraints using an - EqualConstraint as base. - - - - - Resolves the chain of constraints using a - SameAsConstraint as base. - - - - - Resolves the chain of constraints using a - LessThanConstraint as base. - - - - - Resolves the chain of constraints using a - GreaterThanConstraint as base. - - - - - Resolves the chain of constraints using a - LessThanOrEqualConstraint as base. - - - - - Resolves the chain of constraints using a - LessThanOrEqualConstraint as base. - - - - - Resolves the chain of constraints using a - GreaterThanOrEqualConstraint as base. - - - - - Resolves the chain of constraints using a - GreaterThanOrEqualConstraint as base. - - - - - Resolves the chain of constraints using an - ExactTypeConstraint as base. - - - - - Resolves the chain of constraints using an - InstanceOfTypeConstraint as base. - - - - - Resolves the chain of constraints using an - AssignableFromConstraint as base. - - - - - Resolves the chain of constraints using a - ContainsConstraint as base. This constraint - will, in turn, make use of the appropriate - second-level constraint, depending on the - type of the actual argument. - - - - - Resolves the chain of constraints using a - CollectionContainsConstraint as base. - - The expected object - - - - Resolves the chain of constraints using a - StartsWithConstraint as base. - - - - - Resolves the chain of constraints using a - StringEndingConstraint as base. - - - - - Resolves the chain of constraints using a - StringMatchingConstraint as base. - - - - - Resolves the chain of constraints using a - CollectionEquivalentConstraint as base. - - - - - Resolves the chain of constraints using a - CollectionContainingConstraint as base. - - - - - Resolves the chain of constraints using a - CollectionSubsetConstraint as base. - - - - - Resolves the chain of constraints using a - PropertyConstraint as base - - - - - Resolves the chain of constraints using a - PropertyCOnstraint on Length as base - - - - - - - Resolves the chain of constraints using a - PropertyCOnstraint on Length as base - - - - - - - Modifies the ConstraintBuilder by pushing a Prop operator on the - ops stack and the name of the property on the opnds stack. - - - - - - - Resolve a constraint that has been recognized by applying - any pending operators and returning the resulting Constraint. - - A constraint that incorporates all pending operators - - - - Resolves the chain of constraints using - EqualConstraint(null) as base. - - - - - Resolves the chain of constraints using - EqualConstraint(true) as base. - - - - - Resolves the chain of constraints using - EqualConstraint(false) as base. - - - - - Resolves the chain of constraints using - Is.NaN as base. - - - - - Resolves the chain of constraints using - Is.Empty as base. - - - - - Resolves the chain of constraints using - Is.Unique as base. - - - - - Modifies the ConstraintBuilder by pushing a Not operator on the stack. - - - - - Modifies the ConstraintBuilder by pushing a Not operator on the stack. - - - - - Modifies the ConstraintBuilder by pushing an All operator on the stack. - - - - - Modifies the ConstraintBuilder by pushing a Some operator on the stack. - - - - - Modifies the constraint builder by pushing All and Not operators on the stack - - - - - CollectionConstraint is the abstract base class for - constraints that operate on collections. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Protected method to be implemented by derived classes - - - - - - - CollectionTally counts (tallies) the number of - occurences of each object in one or more enuerations. - - - - - Construct a CollectionTally object from a collection - - - - - - Remove the counts for a collection from the tally, - so long as their are sufficient items to remove. - The tallies are not permitted to become negative. - - The collection to remove - True if there were enough items to remove, otherwise false - - - - Test whether all the counts are equal to a given value - - The value to be looked for - True if all counts are equal to the value, otherwise false - - - - Get the count of the number of times an object is present in the tally - - - - - EmptyCollectionConstraint tests whether a colletion is empty. - - - - - Check that the collection is empty - - - - - - - Write the constraint description to a MessageWriter - - - - - - UniqueItemsConstraint tests whether all the items in a - collection are unique. - - - - - Check that all items are unique. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - CollectionContainsConstraint is used to test whether a collection - contains an expected object as a member. - - - - - Construct a CollectionContainsConstraint - - - - - - Test whether the expected item is contained in the collection - - - - - - - Write a descripton of the constraint to a MessageWriter - - - - - - CollectionEquivalentCOnstraint is used to determine whether two - collections are equivalent. - - - - - Construct a CollectionEquivalentConstraint - - - - - - Test whether two collections are equivalent - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - CollectionSubsetConstraint is used to determine whether - one collection is a subset of another - - - - - Construct a CollectionSubsetConstraint - - The collection that the actual value is expected to be a subset of - - - - Test whether the actual collection is a subset of - the expected collection provided. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - EqualConstraint is able to compare an actual value with the - expected value provided in its constructor. - - - - - Initializes a new instance of the class. - - The expected value. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write a failure message. Overridden to provide custom - failure messages for EqualConstraint. - - The MessageWriter to write to - - - - Write description of this constraint - - The MessageWriter to write to - - - - Helper method to compare two arrays - - - - - Method to compare two DirectoryInfo objects - - first directory to compare - second directory to compare - true if equivalent, false if not - - - - Display the failure information for two collections that did not match. - - The MessageWriter on which to display - The expected collection. - The actual collection - The depth of this failure in a set of nested collections - - - - Displays a single line showing the types and sizes of the expected - and actual collections or arrays. If both are identical, the value is - only shown once. - - The MessageWriter on which to display - The expected collection or array - The actual collection or array - The indentation level for the message line - - - - Displays a single line showing the point in the expected and actual - arrays at which the comparison failed. If the arrays have different - structures or dimensions, both values are shown. - - The MessageWriter on which to display - The expected array - The actual array - Index of the failure point in the underlying collections - The indentation level for the message line - - - - Abstract base class used for prefixes - - - - - The base constraint - - - - - Construct given a base constraint - - - - - - Set all modifiers applied to the prefix into - the base constraint before matching - - - - - NotConstraint negates the effect of some other constraint - - - - - Initializes a new instance of the class. - - The base constraint to be negated. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for if the base constraint fails, false if it succeeds - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a MessageWriter. - - The writer on which the actual value is displayed - - - - AllItemsConstraint applies another constraint to each - item in a collection, succeeding if they all succeed. - - - - - Construct an AllItemsConstraint on top of an existing constraint - - - - - - Apply the item constraint to each item in the collection, - failing if any item fails. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - SomeItemsConstraint applies another constraint to each - item in a collection, succeeding if any of them succeeds. - - - - - Construct a SomeItemsConstraint on top of an existing constraint - - - - - - Apply the item constraint to each item in the collection, - failing if any item fails. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - SomeItemsConstraint applies another constraint to each - item in a collection, succeeding if any of them succeeds. - - - - - Construct a SomeItemsConstraint on top of an existing constraint - - - - - - Apply the item constraint to each item in the collection, - failing if any item fails. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - SameAsConstraint tests whether an object is identical to - the object passed to its constructor - - - - - Initializes a new instance of the class. - - The expected object. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - TypeConstraint is the abstract base for constraints - that take a Type as their expected value. - - - - - The expected Type used by the constraint - - - - - Construct a TypeConstraint for a given Type - - - - - - Write the actual value for a failing constraint test to a - MessageWriter. TypeCOnstraints override this method to write - the name of the type. - - The writer on which the actual value is displayed - - - - ExactTypeConstraint is used to test that an object - is of the exact type provided in the constructor - - - - - Construct an ExactTypeConstraint for a given Type - - - - - - Test that an object is of the exact type specified - - - - - - - Write the description of this constraint to a MessageWriter - - - - - - InstanceOfTypeConstraint is used to test that an object - is of the same type provided or derived from it. - - - - - Construct an InstanceOfTypeConstraint for the type provided - - - - - - Test whether an object is of the specified type or a derived type - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - AssignableFromConstraint is used to test that an object - can be assigned from a given Type. - - - - - Construct an AssignableFromConstraint for the type provided - - - - - - Test whether an object can be assigned from the specified type - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - Abstract base class for constraints that compare values to - determine if one is greater than, equal to or less than - the other. - - - - - The value against which a comparison is to be made - - - - - If true, less than returns success - - - - - if true, equal returns success - - - - - if true, greater than returns success - - - - - The predicate used as a part of the description - - - - - Initializes a new instance of the class. - - The value against which to make a comparison. - if set to true less succeeds. - if set to true equal succeeds. - if set to true greater succeeds. - String used in describing the constraint. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Tests whether a value is greater than the value supplied to its constructor - - - - - Initializes a new instance of the class. - - The expected value. - - - - Tests whether a value is greater than or equal to the value supplied to its constructor - - - - - Initializes a new instance of the class. - - The expected value. - - - - Tests whether a value is less than the value supplied to its constructor - - - - - Initializes a new instance of the class. - - The expected value. - - - - Tests whether a value is less than or equal to the value supplied to its constructor - - - - - Initializes a new instance of the class. - - The expected value. - - - - The Numerics class contains common operations on numeric values. - - - - - Checks the type of the object, returning true if - the object is a numeric type. - - The object to check - true if the object is a numeric type - - - - Checks the type of the object, returning true if - the object is a floating point numeric type. - - The object to check - true if the object is a floating point numeric type - - - - Checks the type of the object, returning true if - the object is a fixed point numeric type. - - The object to check - true if the object is a fixed point numeric type - - - - Test two numeric values for equality, performing the usual numeric - conversions and using a provided or default tolerance. If the value - referred to by tolerance is null, this method may set it to a default. - - The expected value - The actual value - A reference to the numeric tolerance in effect - True if the values are equal - - - - Compare two numeric values, performing the usual numeric conversions. - - The expected value - The actual value - - - - - ContainsConstraint tests a whether a string contains a substring - or a collection contains an object. It postpones the decision of - which test to use until the type of the actual argument is known. - This allows testing whether a string is contained in a collection - or as a substring of another string using the same syntax. - - - - - Initializes a new instance of the class. - - The expected. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Summary description for PropertyConstraint. - - - - - Initializes a new instance of the class. - - The name. - The constraint to apply to the property. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - BinaryOperation is the abstract base of all constraints - that combine two other constraints in some fashion. - - - - - The first constraint being combined - - - - - The second constraint being combined - - - - - Construct a BinaryOperation from two other constraints - - The first constraint - The second constraint - - - - AndConstraint succeeds only if both members succeed. - - - - - Create an AndConstraint from two other constraints - - The first constraint - The second constraint - - - - Apply both member constraints to an actual value, succeeding - succeeding only if both of them succeed. - - The actual value - True if the constraints both succeeded - - - - Write a description for this contraint to a MessageWriter - - The MessageWriter to receive the description - - - - OrConstraint succeeds if either member succeeds - - - - - Create an OrConstraint from two other constraints - - The first constraint - The second constraint - - - - Apply the member constraints to an actual value, succeeding - succeeding as soon as one of them succeeds. - - The actual value - True if either constraint succeeded - - - - Write a description for this contraint to a MessageWriter - - The MessageWriter to receive the description - - - - The Is class is a helper class with properties and methods - that supply a number of constraints used in Asserts. - - - - - Is.Null returns a static constraint that tests for null - - - - - Is.True returns a static constraint that tests whether a value is true - - - - - Is.False returns a static constraint that tests whether a value is false - - - - - Is.NaN returns a static constraint that tests whether a value is an NaN - - - - - Is.Empty returns a static constraint that tests whether a string or collection is empty - - - - - Is.Unique returns a static constraint that tests whether a collection contains all unque items. - - - - - Is.EqualTo returns a constraint that tests whether the - actual value equals the supplied argument - - - - - - - Is.SameAs returns a constraint that tests whether the - actual value is the same object as the supplied argument. - - - - - - - Is.GreaterThan returns a constraint that tests whether the - actual value is greater than the suppled argument - - - - - Is.GreaterThanOrEqualTo returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Is.AtLeast is a synonym for Is.GreaterThanOrEqualTo - - - - - Is.LessThan returns a constraint that tests whether the - actual value is less than the suppled argument - - - - - Is.LessThanOrEqualTo returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Is.AtMost is a synonym for Is.LessThanOrEqualTo - - - - - Is.TypeOf returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Is.InstanceOfType returns a constraint that tests whether - the actual value is of the type supplied as an argument - or a derived type. - - - - - Is.AssignableFrom returns a constraint that tests whether - the actual value is assignable from the type supplied as - an argument. - - - - - - - Is.EquivalentTo returns a constraint that tests whether - the actual value is a collection containing the same - elements as the collection supplied as an arument - - - - - Is.SubsetOf returns a constraint that tests whether - the actual value is a subset of the collection - supplied as an arument - - - - - Is.Not returns a ConstraintBuilder that negates - the constraint that follows it. - - - - - Is.All returns a ConstraintBuilder, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. This property is - a synonym for Has.AllItems. - - - - - The Iz class is a synonym for Is intended for use in VB, - which regards Is as a keyword. - - - - - The Text class is a helper class with properties and methods - that supply a number of constraints used with strings. - - - - - Contains returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - DoesNotContain returns a constraint that fails if the actual - value contains the substring supplied as an argument. - - - - - StartsWith returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - DoesNotStartWith returns a constraint that fails if the actual - value starts with the substring supplied as an argument. - - - - - EndsWith returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - DoesNotEndWith returns a constraint that fails if the actual - value ends with the substring supplied as an argument. - - - - - Matches returns a constraint that succeeds if the actual - value matches the pattern supplied as an argument. - - - - - - - DoesNotMatch returns a constraint that failss if the actual - value matches the pattern supplied as an argument. - - - - - - - Text.All returns a ConstraintBuilder, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - The List class is a helper class with properties and methods - that supply a number of constraints used with lists and collections. - - - - - List.Map returns a ListMapper, which can be used to map - the original collection to another collection. - - - - - - - ListMapper is used to transform a collection used as an actual argument - producing another collection to be used in the assertion. - - - - - Construct a ListMapper based on a collection - - The collection to be transformed - - - - Produces a collection containing all the values of a property - - The collection of property values - - - - - Summary description for HasNoPrefixB. - - - - - Returns a new ConstraintBuilder, which will apply the - following constraint to a named property of the object - being tested. - - The name of the property - - - - Returns a new PropertyConstraint checking for the - existence of a particular property value. - - The name of the property to look for - The expected value of the property - - - - Returns a new PropertyConstraint for the Length property - - - - - - - Returns a new PropertyConstraint or the Count property - - - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - The expected object - - - - Has.No returns a ConstraintBuilder that negates - the constraint that follows it. - - - - - Has.AllItems returns a ConstraintBuilder, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - Has.Some returns a ConstraintBuilder, which will apply - the following constraint to all members of a collection, - succeeding if any of them succeed. It is a synonym - for Has.Item. - - - - - Has.None returns a ConstraintBuilder, which will apply - the following constraint to all members of a collection, - succeeding only if none of them succeed. - - - - - Nested class that allows us to restrict the number - of key words that may appear after Has.No. - - - - - Return a ConstraintBuilder conditioned to apply - the following constraint to a property. - - The property name - A ConstraintBuilder - - - - Return a Constraint that succeeds if the expected object is - not contained in a collection. - - The expected object - A Constraint - - - - The Assert class contains a collection of static methods that - implement the most common assertions used in NUnit. - - - - - We don't actually want any instances of this object, but some people - like to inherit from it to add other static methods. Hence, the - protected constructor disallows any instances of this object. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - The message to display if the condition is true - Arguments to be used in formatting the message - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - The message to display if the condition is true - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - The message to be displayed when the object is null - Arguments to be used in formatting the message - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - The message to be displayed when the object is null - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - The message to be displayed when the object is not null - Arguments to be used in formatting the message - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - The message to be displayed when the object is not null - - - - Verifies that the object that is passed in is equal to null - If the object is not null null then an - is thrown. - - The object that is to be tested - - - - Verifies that the double is passed is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - The message to be displayed when the object is not null - Arguments to be used in formatting the message - - - - Verifies that the double is passed is an NaN value. - If the object is not NaN then an - is thrown. - - The object that is to be tested - The message to be displayed when the object is not null - - - - Verifies that the double is passed is an NaN value. - If the object is not NaN then an - is thrown. - - The object that is to be tested - - - - Assert that a string is empty - that is equal to string.Empty - - The string to be tested - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that a string is empty - that is equal to string.Emtpy - - The string to be tested - The message to be displayed on failure - - - - Assert that a string is empty - that is equal to string.Emtpy - - The string to be tested - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - The message to be displayed on failure - - - - Assert that an array,list or other collection is empty - - An array, list or other collection implementing ICollection - - - - Assert that a string is not empty - that is not equal to string.Empty - - The string to be tested - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that a string is empty - that is equal to string.Emtpy - - The string to be tested - The message to be displayed on failure - - - - Assert that a string is empty - that is equal to string.Emtpy - - The string to be tested - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - The message to be displayed on failure - - - - Assert that an array,list or other collection is empty - - An array, list or other collection implementing ICollection - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - The messge to display in case of failure - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - The messge to display in case of failure - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - A message to display in case of failure - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - A message to display in case of failure - An array of objects to be used in formatting the message - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - A message to display in case of failure - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - A message to display in case of failure - An array of objects to be used in formatting the message - - - - Verifies that two ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that two ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - - - - Verifies that two ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that two longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - - - - Verifies that two longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two uints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that two uints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - - - - Verifies that two uints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two ulongs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that two ulongs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - - - - Verifies that two ulongs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two decimals are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that two decimal are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message that will be displayed on failure - - - - Verifies that two decimals are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equals then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equals then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message that will be displayed on failure - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equals then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - - - - Verifies that two floats are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equals then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message displayed upon failure - Arguments to be used in formatting the message - - - - Verifies that two floats are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equals then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message displayed upon failure - - - - Verifies that two floats are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equals then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - - - - Verifies that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. All - non-numeric types are compared by using the Equals method. - Arrays are compared by comparing each element using the same rules. - If they are not equal an is thrown. - - The value that is expected - The actual value - The message to display if objects are not equal - Arguments to be used in formatting the message - - - - Verifies that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. All - non-numeric types are compared by using the Equals method. - If they are not equal an is thrown. - - The value that is expected - The actual value - The message to display if objects are not equal - - - - Verifies that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. All - non-numeric types are compared by using the Equals method. - If they are not equal an is thrown. - - The value that is expected - The actual value - - - - Asserts that two objects are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two objects are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two objects are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two ints are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two ints are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two ints are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two longss are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two longs are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two longs are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two uints are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two uints are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two uints are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two ulongs are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two ulongs are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two ulong are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two decimals are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two decimals are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two decimals are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two floats are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two floats are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two floats are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two doubles are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two doubles are not equal. If they are equal - an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two doubles are not equal. If they are equal - an is thrown. - - The expected object - The actual object - - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are not the same object. - Arguments to be used in formatting the message - - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - The expected object - The actual object - The message to be displayed when the object is null - - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - The expected object - The actual object - - - - Asserts that two objects do not refer to the same object. If they - are the same an is thrown. - - The expected object - The actual object - The message to be displayed when the two objects are the same object. - Arguments to be used in formatting the message - - - - Asserts that two objects do not refer to the same object. If they - are the same an is thrown. - - The expected object - The actual object - The message to be displayed when the objects are the same - - - - Asserts that two objects do not refer to the same object. If they - are the same an is thrown. - - The expected object - The actual object - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Asserts that an object is contained in a list. - - The expected object - The list to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that an object is contained in a list. - - The expected object - The list to be examined - The message to display in case of failure - - - - Asserts that an object is contained in a list. - - The expected object - The list to be examined - - - - Throws an with the message and arguments - that are passed in. This is used by the other Assert functions. - - The message to initialize the with. - Arguments to be used in formatting the message - - - - Throws an with the message that is - passed in. This is used by the other Assert functions. - - The message to initialize the with. - - - - Throws an . - This is used by the other Assert functions. - - - - - Throws an with the message and arguments - that are passed in. This causes the test to be reported as ignored. - - The message to initialize the with. - Arguments to be used in formatting the message - - - - Throws an with the message that is - passed in. This causes the test to be reported as ignored. - - The message to initialize the with. - - - - Throws an . - This causes the test to be reported as ignored. - - - - - NOTE: The use of asserters for extending NUnit has - now been replaced by the use of constraints. This - method is marked obsolete. - - Test the condition asserted by an asserter and throw - an assertion exception using provided message on failure. - - An object that implements IAsserter - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - A Constraint to be applied - The actual value to test - - - - Apply a constraint to an actual value, succeedingt if the constraint - is satisfied and throwing an assertion exception on failure. - - A Constraint to be applied - The actual value to test - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeedingt if the constraint - is satisfied and throwing an assertion exception on failure. - - A Constraint to be applied - The actual value to test - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater or equal to than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater or equal to than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater or equal to than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than or equal to the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message that will be displayed on failure - - - - Verifies that the first value is greater than the second - value. If they are not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message that will be displayed on failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Gets the number of assertions executed so far and - resets the counter to zero. - - - - - Enumeration indicating how the expected message parameter is to be used - - - - Expect an exact match - - - Expect a message containing the parameter string - - - Match the regular expression provided as a parameter - - - - ExpectedExceptionAttribute - - - - - - Constructor for a non-specific exception - - - - - Constructor for a given type of exception - - The type of the expected exception - - - - Constructor for a given exception name - - The full name of the expected exception - - - - Constructor for a given type of exception and expected message text - - The type of the expected exception - The expected message text - - - - Constructor for a given exception name and expected message text - - The full name of the expected exception - The expected messge text - - - - Gets or sets the expected exception type - - - - - Gets or sets the full Type name of the expected exception - - - - - Gets or sets the expected message text - - - - - Gets or sets the user message displayed in case of failure - - - - - Gets or sets the type of match to be performed on the expected message - - - - - Gets the name of a method to be used as an exception handler - - - - - A set of Assert methods operationg on one or more collections - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Asserts that all items contained in collection are of the type specified by expectedType. - - IEnumerable containing objects to be considered - System.Type that all objects in collection must be instances of - - - - Asserts that all items contained in collection are of the type specified by expectedType. - - IEnumerable containing objects to be considered - System.Type that all objects in collection must be instances of - The message that will be displayed on failure - - - - Asserts that all items contained in collection are of the type specified by expectedType. - - IEnumerable containing objects to be considered - System.Type that all objects in collection must be instances of - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that all items contained in collection are not equal to null. - - IEnumerable containing objects to be considered - - - - Asserts that all items contained in collection are not equal to null. - - IEnumerable containing objects to be considered - The message that will be displayed on failure - - - - Asserts that all items contained in collection are not equal to null. - - IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Ensures that every object contained in collection exists within the collection - once and only once. - - IEnumerable of objects to be considered - - - - Ensures that every object contained in collection exists within the collection - once and only once. - - IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Ensures that every object contained in collection exists within the collection - once and only once. - - IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are equivalent, containing the same objects but the match may be in any order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are equivalent, containing the same objects but the match may be in any order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are equivalent, containing the same objects but the match may be in any order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are not exactly equal. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are not exactly equal. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - - - - Asserts that expected and actual are not exactly equal. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are not exactly equal. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - - - - Asserts that expected and actual are not exactly equal. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are not exactly equal. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are not equivalent. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are not equivalent. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are not equivalent. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that collection contains actual as an item. - - IEnumerable of objects to be considered - Object to be found within collection - - - - Asserts that collection contains actual as an item. - - IEnumerable of objects to be considered - Object to be found within collection - The message that will be displayed on failure - - - - Asserts that collection contains actual as an item. - - IEnumerable of objects to be considered - Object to be found within collection - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that collection does not contain actual as an item. - - IEnumerable of objects to be considered - Object that cannot exist within collection - - - - Asserts that collection does not contain actual as an item. - - IEnumerable of objects to be considered - Object that cannot exist within collection - The message that will be displayed on failure - - - - Asserts that collection does not contain actual as an item. - - IEnumerable of objects to be considered - Object that cannot exist within collection - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that superset is not a subject of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - - - - Asserts that superset is not a subject of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - - - - Asserts that superset is not a subject of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that superset is a subset of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - - - - Asserts that superset is a subset of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - - - - Asserts that superset is a subset of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - - - - Assert that an array,list or other collection is empty - - An array, list or other collection implementing IEnumerable - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - - - - Assert that an array,list or other collection is empty - - An array, list or other collection implementing IEnumerable - - - - NOTE: The use of asserters for extending NUnit has - now been replaced by the use of constraints. This - class is marked obsolete. - - AbstractAsserter is the base class for all asserters. - Asserters encapsulate a condition test and generation - of an AssertionException with a tailored message. They - are used by the Assert class as helper objects. - - User-defined asserters may be passed to the - Assert.DoAssert method in order to implement - extended asserts. - - - - - NOTE: The use of asserters for extending NUnit has - now been replaced by the use of constraints. This - interface is marked obsolete. - - The interface implemented by an asserter. Asserters - encapsulate a condition test and generation of an - AssertionException with a tailored message. They - are used by the Assert class as helper objects. - - User-defined asserters may be passed to the - Assert.DoAssert method in order to implement - extended asserts. - - - - - Test the condition for the assertion. - - True if the test succeeds - - - - Return the message giving the failure reason. - The return value is unspecified if no failure - has occured. - - - - - The user-defined message for this asserter. - - - - - Arguments to use in formatting the user-defined message. - - - - - Our failure message object, initialized as needed - - - - - Constructs an AbstractAsserter - - The message issued upon failure - Arguments to be used in formatting the message - - - - Test method to be implemented by derived types. - Default always succeeds. - - True if the test succeeds - - - - AssertionFailureMessage object used internally - - - - - Message related to a failure. If no failure has - occured, the result is unspecified. - - - - - The Assertion class is obsolete and has been - replaced by the Assert class. - - - - - Asserts that a condition is true. If it isn't it throws - an . - - The message to display is the condition - is false - The evaluated condition - - - - Asserts that a condition is true. If it isn't it throws - an . - - The evaluated condition - - - - /// Asserts that two doubles are equal concerning a delta. If the - expected value is infinity then the delta value is ignored. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - - - - /// Asserts that two singles are equal concerning a delta. If the - expected value is infinity then the delta value is ignored. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - - - Asserts that two objects are equal. If they are not - an is thrown. - - - Asserts that two ints are equal. If they are not - an is thrown. - - - Asserts that two ints are equal. If they are not - an is thrown. - - - Asserts that two doubles are equal concerning a delta. - If the expected value is infinity then the delta value is ignored. - - - - Asserts that two floats are equal concerning a delta. - If the expected value is infinity then the delta value is ignored. - - - - - Asserts that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. Numeric - types are compared via string comparision on their contents to - avoid problems comparing values between different types. All - non-numeric types are compared by using the Equals method. - If they are not equal an is thrown. - - - - Asserts that an object isn't null. - - - Asserts that an object isn't null. - - - Asserts that an object is null. - - - Asserts that an object is null. - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - - - Asserts that two objects refer to the same object. - If they are not an is thrown. - - - - Fails a test with no message. - - - Fails a test with the given message. - - - - Thrown when an assertion failed. - - - - - The error message that explains - the reason for the exception - - - The error message that explains - the reason for the exception - The exception that caused the - current exception - - - - Serialization Constructor - - - - - AssertionFailureMessage encapsulates a failure message - issued as a result of an Assert failure. - - - - - Number of characters before a highlighted position before - clipping will occur. Clipped text is replaced with an - elipsis "..." - - - - - Number of characters after a highlighted position before - clipping will occur. Clipped text is replaced with an - elipsis "..." - - - - - Prefix used to start an expected value line. - Must be same length as actualPrefix. - - - - - Prefix used to start an actual value line. - Must be same length as expectedPrefix. - - - - - Construct an AssertionFailureMessage with a message - and optional arguments. - - - - - - - Construct an empty AssertionFailureMessage - - - - - Add an expected value line to the message containing - the text provided as an argument. - - Text describing what was expected. - - - - Add an actual value line to the message containing - the text provided as an argument. - - Text describing the actual value. - - - - Add an expected value line to the message containing - a string representation of the object provided. - - An object representing the expected value - - - - Add an expected value line to the message containing a double - and the tolerance used in making the comparison. - - The expected value - The tolerance specified in the Assert - - - - Add an actual value line to the message containing - a string representation of the object provided. - - An object representing what was actually found - - - - Display two lines that communicate the expected value, and the actual value - - The expected value - The actual value found - - - - Display two lines that communicate the expected value, the actual value and - the tolerance used in comparing two doubles. - - The expected value - The actual value found - The tolerance specified in the Assert - - - - Draws a marker under the expected/actual strings that highlights - where in the string a mismatch occurred. - - The position of the mismatch - - - - Reports whether the string lengths are the same or different, and - what the string lengths are. - - The expected string - The actual string value - - - - Called to create additional message lines when two objects have been - found to be unequal. If the inputs are strings, a special message is - rendered that can help track down where the strings are different, - based on differences in length, or differences in content. - - If the inputs are not strings, the ToString method of the objects - is used to show what is different about them. - - The expected value - The actual value - True if a case-insensitive comparison is being performed - - - - Called to create additional message lines when two doubles have been - found to be unequal, within the specified tolerance. - - - - - Constructs a message that can be displayed when the content of two - strings are different, but the string lengths are the same. The - message will clip the strings to a reasonable length, centered - around the first position where they are mismatched, and draw - a line marking the position of the difference to make comparison - quicker. - - The expected string value - The actual string value - True if a case-insensitive comparison is being performed - - - - Display a standard message showing the differences found between - two arrays that were expected to be equal. - - The expected array value - The actual array value - The index at which a difference was found - - - - Display a standard message showing the differences found between - two collections that were expected to be equal. - - The expected collection value - The actual collection value - The index at which a difference was found - - - - Get an array of indices representing the point in a collection or - array corresponding to a single int index into the collection. - - The collection to which the indices apply - Index in the collection - Array of indices - - - - Displays elements from a list on a line - - Text to prefix the line with - The list of items to display - The index in the list of the first element to display - The maximum number of elements to display - - - - Formats an object for display in a message line - - The object to be displayed - - - - - Tests two objects to determine if they are strings. - - - - - - - - Renders up to M characters before, and up to N characters after - the specified index position. If leading or trailing text is - clipped, and elipses "..." is added where the missing text would - be. - - Clips strings to limit previous or post newline characters, - since these mess up the comparison - - - - - - - - Shows the position two strings start to differ. Comparison - starts at the start index. - - - - - -1 if no mismatch found, or the index where mismatch found - - - - Turns CR, LF, or TAB into visual indicator to preserve visual marker - position. This is done by replacing the '\r' into '\\' and 'r' - characters, and the '\n' into '\\' and 'n' characters, and '\t' into - '\\' and 't' characters. - - Thus the single character becomes two characters for display. - - - - - - - Attribute used to apply a category to a test - - - - - The name of the category - - - - - Construct attribute for a given category - - The name of the category - - - - Protected constructor uses the Type name as the name - of the category. - - - - - The name of the category - - - - - Abstract base for Attributes that are used to include tests - in the test run based on environmental settings. - - - - - Constructor with no included items specified, for use - with named property syntax. - - - - - Constructor taking one or more included items - - Comma-delimited list of included items - - - - Name of the item that is needed in order for - a test to run. Multiple itemss may be given, - separated by a comma. - - - - - Name of the item to be excluded. Multiple items - may be given, separated by a comma. - - - - - The reason for including or excluding the test - - - - - PlatformAttribute is used to mark a test fixture or an - individual method as applying to a particular platform only. - - - - - Constructor with no platforms specified, for use - with named property syntax. - - - - - Constructor taking one or more platforms - - Comma-deliminted list of platforms - - - - CultureAttribute is used to mark a test fixture or an - individual method as applying to a particular Culture only. - - - - - Constructor with no cultures specified, for use - with named property syntax. - - - - - Constructor taking one or more cultures - - Comma-deliminted list of cultures - - - - MessageWriter is the abstract base for classes that write - constraint descriptions and messages in some form. The - class has separate methods for writing various components - of a message, allowing implementations to tailor the - presentation as needed. - - - - - Construct a MessageWriter given a culture - - - - - Method to write single line message with optional args, usually - written to precede the general failure message. - - The message to be written - Any arguments used in formatting the message - - - - Method to write single line message with optional args, usually - written to precede the general failure message, at a givel - indentation level. - - The indentation level of the message - The message to be written - Any arguments used in formatting the message - - - - Display Expected and Actual lines for a constraint. This - is called by MessageWriter's default implementation of - WriteMessageTo and provides the generic two-line display. - - The constraint that failed - - - - Display Expected and Actual lines for given values. This - method may be called by constraints that need more control over - the display of actual and expected values than is provided - by the default implementation. - - The expected value - The actual value causing the failure - - - - Display Expected and Actual lines for given values, including - a tolerance value on the Expected line. - - The expected value - The actual value causing the failure - The tolerance within which the test was made - - - - Display the expected and actual string values on separate lines. - If the mismatch parameter is >=0, an additional line is displayed - line containing a caret that points to the mismatch point. - - The expected string value - The actual string value - The point at which the strings don't match or -1 - If true, case is ignored in locating the point where the strings differ - If true, the strings should be clipped to fit the line - - - - Writes the text for a connector. - - The connector. - - - - Writes the text for a predicate. - - The predicate. - - - - Writes the text for an expected value. - - The expected value. - - - - Writes the text for a modifier - - The modifier. - - - - Writes the text for an actual value. - - The actual value. - - - - Writes the text for a generalized value. - - The value. - - - - Writes the text for a collection value, - starting at a particular point, to a max length - - The collection containing elements to write. - The starting point of the elements to write - The maximum number of elements to write - - - - Abstract method to get the max line length - - - - - Static methods used in creating messages - - - - - Static string used when strings are clipped - - - - - Returns the representation of a type as used in NUnitLite. - This is the same as Type.ToString() except for arrays, - which are displayed with their declared sizes. - - - - - - - Converts any control characters in a string - to their escaped representation. - - The string to be converted - The converted string - - - - Return the a string representation for a set of indices into an array - - Array of indices for which a string is needed - - - - Get an array of indices representing the point in a collection or - array corresponding to a single int index into the collection. - - The collection to which the indices apply - Index in the collection - Array of indices - - - - Clip a string to a given length, starting at a particular offset, returning the clipped - string with ellipses representing the removed parts - - The string to be clipped - The maximum permitted length of the result string - The point at which to start clipping - The clipped string - - - - Clip the expected and actual strings in a coordinated fashion, - so that they may be displayed together. - - - - - - - - - Shows the position two strings start to differ. Comparison - starts at the start index. - - The expected string - The actual string - The index in the strings at which comparison should start - Boolean indicating whether case should be ignored - -1 if no mismatch found, or the index where mismatch found - - - - Summary description for SetCultureAttribute. - - - - - PropertyAttribute is used to attach information to a test as a name/value pair.. - - - - - The property name - - - - - The property value - - - - - Construct a PropertyAttribute with a name and value - - The name of the property - The property value - - - - Constructor for use by inherited classes that use the - name of the type as the property name. - - - - - Gets the property name - - - - - Gets the property value - - - - - Construct given the name of a culture - - - - - - TextMessageWriter writes constraint descriptions and messages - in displayable form as a text stream. It tailors the display - of individual message components to form the standard message - format of NUnit assertion failure messages. - - - - - Prefix used for the expected value line of a message - - - - - Prefix used for the actual value line of a message - - - - - Length of a message prefix - - - - - Construct a TextMessageWriter - - - - - Construct a TextMessageWriter, specifying a user message - and optional formatting arguments. - - - - - - - Method to write single line message with optional args, usually - written to precede the general failure message, at a givel - indentation level. - - The indentation level of the message - The message to be written - Any arguments used in formatting the message - - - - Display Expected and Actual lines for a constraint. This - is called by MessageWriter's default implementation of - WriteMessageTo and provides the generic two-line display. - - The constraint that failed - - - - Display Expected and Actual lines for given values. This - method may be called by constraints that need more control over - the display of actual and expected values than is provided - by the default implementation. - - The expected value - The actual value causing the failure - - - - Display Expected and Actual lines for given values, including - a tolerance value on the expected line. - - The expected value - The actual value causing the failure - The tolerance within which the test was made - - - - Display the expected and actual string values on separate lines. - If the mismatch parameter is >=0, an additional line is displayed - line containing a caret that points to the mismatch point. - - The expected string value - The actual string value - The point at which the strings don't match or -1 - If true, case is ignored in string comparisons - If true, clip the strings to fit the max line length - - - - Writes the text for a connector. - - The connector. - - - - Writes the text for a predicate. - - The predicate. - - - - Write the text for a modifier. - - The modifier. - - - - Writes the text for an expected value. - - The expected value. - - - - Writes the text for an actual value. - - The actual value. - - - - Writes the text for a generalized value. - - The value. - - - - Writes the text for a collection value, - starting at a particular point, to a max length - - The collection containing elements to write. - The starting point of the elements to write - The maximum number of elements to write - - - - Write the generic 'Expected' line for a constraint - - The constraint that failed - - - - Write the generic 'Expected' line for a given value - - The expected value - - - - Write the generic 'Expected' line for a given value - and tolerance. - - The expected value - The tolerance within which the test was made - - - - Write the generic 'Actual' line for a constraint - - The constraint for which the actual value is to be written - - - - Write the generic 'Actual' line for a given value - - The actual value causing a failure - - - - Gets or sets the maximum line length for this writer - - - - - Basic Asserts on strings. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - - - - Asserts that a string starts with another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string starts with another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string starts with another string. - - The expected string - The string to be examined - - - - Asserts that a string ends with another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string ends with another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string ends with another string. - - The expected string - The string to be examined - - - - Asserts that two strings are equal, without regard to case. - - The expected string - The actual string - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that two strings are equal, without regard to case. - - The expected string - The actual string - The message to display in case of failure - - - - Asserts that two strings are equal, without regard to case. - - The expected string - The actual string - - - - Asserts that a string matches an expected regular expression pattern. - - The expected expression - The actual string - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string matches an expected regular expression pattern. - - The expected expression - The actual string - The message to display in case of failure - - - - Asserts that a string matches an expected regular expression pattern. - - The expected expression - The actual string - - - - AssertionHelper is an optional base class for user tests, - allowing the use of shorter names for constraints and - asserts and avoiding conflict with the definition of - , from which it inherits much of its - behavior, in certain mock object frameworks. - - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. Works - identically to - - A Constraint to be applied - The actual value to test - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. Works - identically to - - A Constraint to be applied - The actual value to test - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. Works - identically to - - A Constraint to be applied - The actual value to test - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . Works Identically to - . - - The evaluated condition - The message to display if the condition is false - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . Works Identically to - . - - The evaluated condition - The message to display if the condition is false - - - - Asserts that a condition is true. If the condition is false the method throws - an . Works Identically to . - - The evaluated condition - - - - Returns a ListMapper based on a collection. - - The original collection - - - - - Summary description for FileAssert. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - We don't actually want any instances of this object, but some people - like to inherit from it to add other static methods. Hence, the - protected constructor disallows any instances of this object. - - - - - Verifies that two Streams are equal. Two Streams are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The expected Stream - The actual Stream - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Verifies that two Streams are equal. Two Streams are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The expected Stream - The actual Stream - The message to display if objects are not equal - - - - Verifies that two Streams are equal. Two Streams are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The expected Stream - The actual Stream - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if objects are not equal - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A file containing the value that is expected - A file containing the actual value - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if objects are not equal - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - - - - Asserts that two Streams are not equal. If they are equal - an is thrown. - - The expected Stream - The actual Stream - The message to be displayed when the two Stream are the same. - Arguments to be used in formatting the message - - - - Asserts that two Streams are not equal. If they are equal - an is thrown. - - The expected Stream - The actual Stream - The message to be displayed when the Streams are the same. - - - - Asserts that two Streams are not equal. If they are equal - an is thrown. - - The expected Stream - The actual Stream - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if objects are not equal - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - A file containing the value that is expected - A file containing the actual value - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if objects are not equal - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - - - - Thrown when an assertion failed. - - - - - - - The error message that explains - the reason for the exception - The exception that caused the - current exception - - - - Serialization Constructor - - - - - GlobalSettings is a place for setting default values used - by the framework in performing asserts. - - - - - Default tolerance for floating point equality - - - - - Obsolete class, formerly used to identify tests through - inheritance. Avoid using this class for new tests. - - - - - Method called immediately before running the test. - - - - - Method Called immediately after running the test. It is - guaranteed to be called, even if an exception is thrown. - - - - - Attribute used to mark a class that contains one-time SetUp - and/or TearDown methods that apply to all the tests in a - namespace or an assembly. - - - - - SetUpFixtureAttribute is used to identify a SetUpFixture - - - - - Attribute used to mark a static (shared in VB) property - that returns a list of tests. - - - - - Attribute used to identify a method that is called - immediately after each test is run. The method is - guaranteed to be called, even if an exception is thrown. - - - - - Adding this attribute to a method within a - class makes the method callable from the NUnit test runner. There is a property - called Description which is optional which you can provide a more detailed test - description. This class cannot be inherited. - - - - [TestFixture] - public class Fixture - { - [Test] - public void MethodToTest() - {} - - [Test(Description = "more detailed description")] - publc void TestDescriptionMethod() - {} - } - - - - - - Descriptive text for this test - - - - - [TestFixture] - public class ExampleClass - {} - - - - - Descriptive text for this fixture - - - - - Attribute used to identify a method that is - called before any tests in a fixture are run. - - - - - Attribute used to identify a method that is called after - all the tests in a fixture have run. The method is - guaranteed to be called, even if an exception is thrown. - - - - - Attribute used to mark a test that is to be ignored. - Ignored tests result in a warning message when the - tests are run. - - - - - Constructs the attribute without giving a reason - for ignoring the test. - - - - - Constructs the attribute giving a reason for ignoring the test - - The reason for ignoring the test - - - - The reason for ignoring a test - - - - - ExplicitAttribute marks a test or test fixture so that it will - only be run if explicitly executed from the gui or command line - or if it is included by use of a filter. The test will not be - run simply because an enclosing suite is run. - - - - - Default constructor - - - - - Constructor with a reason - - The reason test is marked explicit - - - - The reason test is marked explicit - - - - - Attribute used to provide descriptive text about a - test case or fixture. - - - - - Construct the attribute - - Text describing the test - - - - Gets the test description - - - - - Interface implemented by a user fixture in order to - validate any expected exceptions. It is only called - for test methods marked with the ExpectedException - attribute. - - - - - Method to handle an expected exception - - The exception to be handled - - - diff --git a/lib/tests/nunit.util.dll b/lib/tests/nunit.util.dll deleted file mode 100644 index 71ca49bc..00000000 Binary files a/lib/tests/nunit.util.dll and /dev/null differ diff --git a/license.txt b/license.txt new file mode 100644 index 00000000..76e55b16 --- /dev/null +++ b/license.txt @@ -0,0 +1,70 @@ +ServiceStack +Copyright (c) 2013 Service Stack +=============================================================================== + +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU Affero General Public License as published by the +Free Software Foundation, either version 3 of the License, see +http://www.gnu.org/licenses/agpl-3.0.html. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. + + +FOSS License Exception +=============================================================================== + +This Exception applies to open source applications built with ServiceStack and +ServiceStack extensions ("The Software"), and to open source Derivative Works of +the Software, that use the Software under the terms of GNU Affero General +Public License, version 3 ("AGPLv3"). The Exception extends AGPLv3 by providing +additional grants that allows developers of FOSS applications to include ServiceStack +with their FOSS applications in combination with other software licensed under +the licenses from the "Open Source License List" below, provided that: + +You obey the AGPLv3 terms for the Software and the Derivative Work, except for +the separate parts of the Derivative Work ("Additions") which constitute independent +work and are not dervied from the Software. + + - All Additions are distributed subject to one of the licenses listed below. + - Your software distribution provides complete source code for the Additions. + - The Derivative Work and its Additions are intended for use in end-user applications + and do not constitute software intended for use by software developers, such as + software libraries, components, and development kits. + - If you violate any of the terms in this Exception, you lose all rights granted + to you by the Exception and revert to the terms of AGPLv3. + +Service Stack reserves all rights not expressly granted in these terms and conditions. + +Open Source License List + +Name Version +Academic Free License 2.0 +Apache Software License 2.0 +Apple Public Source License 2.0 +Artistic license From Perl 5.8.0 +BSD license July 22 1999 +Common Development and Distribution License (CDDL) 1.0 +Common Public License 1.0 +Eclipse Public License 1.0 +Educational Community License 2.0 +European Union Public License (EUPL) 1.1 +GNU General Public License (GPL) 2.0 +GNU Library or "Lesser" General Public License (LGPL) 3.0 +Jabber Open Source License 1.0 +MIT License (As listed in file MIT-License.txt) - +Mozilla Public License (MPL) 1.0/1.1 +Open Software License 2.0 +OpenSSL license (with original SSLeay license) 2003 (1998) +University of Illinois/NCSA Open Source License - +W3C License 2001 +X11 License 2001 +Zlib/libpng License - + + + +Commercial License +=========================================================================== +In addition to this license, ServiceStack is offered under a commerical license. +Contact team@servicestack.net for details. diff --git a/src/.nuget/NuGet.config b/src/.nuget/NuGet.config deleted file mode 100644 index 67f8ea04..00000000 --- a/src/.nuget/NuGet.config +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/src/.nuget/NuGet.exe b/src/.nuget/NuGet.exe deleted file mode 100644 index b5c8886d..00000000 Binary files a/src/.nuget/NuGet.exe and /dev/null differ diff --git a/src/.nuget/NuGet.targets b/src/.nuget/NuGet.targets deleted file mode 100644 index 4cf24f51..00000000 --- a/src/.nuget/NuGet.targets +++ /dev/null @@ -1,77 +0,0 @@ - - - - $(MSBuildProjectDirectory)\..\ - - - - - $([System.IO.Path]::Combine($(SolutionDir), ".nuget")) - $([System.IO.Path]::Combine($(ProjectDir), "packages.config")) - $([System.IO.Path]::Combine($(SolutionDir), "packages")) - - - - - $(SolutionDir).nuget - packages.config - $(SolutionDir)packages - - - - - $(NuGetToolsPath)\nuget.exe - "$(NuGetExePath)" - mono --runtime=v4.0.30319 $(NuGetExePath) - - $(TargetDir.Trim('\\')) - - - "" - - - false - - - false - - - $(NuGetCommand) install "$(PackagesConfig)" -source $(PackageSources) -o "$(PackagesDir)" - $(NuGetCommand) pack "$(ProjectPath)" -p Configuration=$(Configuration) -o "$(PackageOutputDir)" -symbols - - - - RestorePackages; - $(BuildDependsOn); - - - - - $(BuildDependsOn); - BuildPackage; - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/Directory.Build.props b/src/Directory.Build.props new file mode 100644 index 00000000..2a6ba4e0 --- /dev/null +++ b/src/Directory.Build.props @@ -0,0 +1,54 @@ + + + + 6.0.3 + ServiceStack + ServiceStack, Inc. + © 2008-2018 ServiceStack, Inc + true + https://github.com/ServiceStack/ServiceStack.Redis + https://servicestack.net/terms + https://servicestack.net/img/logo-64.png + https://docs.servicestack.net/release-notes-history + git + https://github.com/ServiceStack/ServiceStack.Redis.git + embedded + latest + true + true + false + + + + true + true + + + + $(DefineConstants);NETFX;NET45;NET472 + True + False + ../servicestack.snk + + + + $(DefineConstants);NETSTANDARD;NETSTANDARD2_0 + + + + $(DefineConstants);NET6_0;NET6_0_OR_GREATER + + + + $(DefineConstants);NETCORE;NETCORE_SUPPORT + + + + + + + + DEBUG + + + diff --git a/src/ServiceStack.Redis.sln b/src/ServiceStack.Redis.sln index 7d72534f..c2ea43b7 100644 --- a/src/ServiceStack.Redis.sln +++ b/src/ServiceStack.Redis.sln @@ -1,63 +1,103 @@  -Microsoft Visual Studio Solution File, Format Version 11.00 -# Visual Studio 2010 +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29721.120 +MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Build", "Build", "{38F69F8F-9303-4BAF-B081-D28339163E07}" ProjectSection(SolutionItems) = preProject + ..\build\build-core.proj = ..\build\build-core.proj ..\build\build.bat = ..\build\build.bat - ..\build\NuGetPack.cmd = ..\build\NuGetPack.cmd - ..\NuGet\servicestack.redis.nuspec = ..\NuGet\servicestack.redis.nuspec + ..\build\build.proj = ..\build\build.proj + ..\build\build.tasks = ..\build\build.tasks + Directory.Build.props = Directory.Build.props + ..\tests\Directory.Build.props = ..\tests\Directory.Build.props + ..\README.md = ..\README.md + ServiceStack.Redis\ServiceStack.Redis.Core.csproj = ServiceStack.Redis\ServiceStack.Redis.Core.csproj EndProjectSection EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis", "ServiceStack.Redis\ServiceStack.Redis.csproj", "{AF99F19B-4C04-4F58-81EF-B092F1FCC540}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceStack.Redis", "ServiceStack.Redis\ServiceStack.Redis.csproj", "{AF99F19B-4C04-4F58-81EF-B092F1FCC540}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis.Tests", "..\tests\ServiceStack.Redis.Tests\ServiceStack.Redis.Tests.csproj", "{951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceStack.Redis.Tests", "..\tests\ServiceStack.Redis.Tests\ServiceStack.Redis.Tests.csproj", "{951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceStack.Redis.Tests.Sentinel", "..\tests\ServiceStack.Redis.Tests.Sentinel\ServiceStack.Redis.Tests.Sentinel.csproj", "{91C55091-A946-49B5-9517-8794EBCC5784}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis.Benchmark", "..\tests\ServiceStack.Redis.Benchmark\ServiceStack.Redis.Benchmark.csproj", "{959CA5FE-6525-4EEF-86CA-F4978BEFF14F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Console.Tests", "..\tests\Console.Tests\Console.Tests.csproj", "{56DEDC64-B349-4150-BE9C-5805D831678D}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 - MonoTouch|Any CPU = MonoTouch|Any CPU - MonoTouch|x86 = MonoTouch|x86 Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 Release|x86 = Release|x86 - STATIC_ONLY NO_EXPRESSIONS|Any CPU = STATIC_ONLY NO_EXPRESSIONS|Any CPU - STATIC_ONLY NO_EXPRESSIONS|x86 = STATIC_ONLY NO_EXPRESSIONS|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|Any CPU.Build.0 = Debug|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.ActiveCfg = Debug|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.Build.0 = Debug|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.MonoTouch|Any CPU.ActiveCfg = MonoTouch|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.MonoTouch|Any CPU.Build.0 = MonoTouch|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.MonoTouch|x86.ActiveCfg = MonoTouch|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.MonoTouch|x86.Build.0 = MonoTouch|x86 + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x64.ActiveCfg = Debug|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x64.Build.0 = Debug|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.ActiveCfg = Debug|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.Build.0 = Debug|Any CPU {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|Any CPU.ActiveCfg = Release|Any CPU {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|Any CPU.Build.0 = Release|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.ActiveCfg = Release|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.Build.0 = Release|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.STATIC_ONLY NO_EXPRESSIONS|Any CPU.ActiveCfg = STATIC_ONLY NO_EXPRESSIONS|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.STATIC_ONLY NO_EXPRESSIONS|Any CPU.Build.0 = STATIC_ONLY NO_EXPRESSIONS|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.STATIC_ONLY NO_EXPRESSIONS|x86.ActiveCfg = STATIC_ONLY NO_EXPRESSIONS|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.STATIC_ONLY NO_EXPRESSIONS|x86.Build.0 = STATIC_ONLY NO_EXPRESSIONS|x86 + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x64.ActiveCfg = Release|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x64.Build.0 = Release|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.ActiveCfg = Release|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.Build.0 = Release|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.ActiveCfg = Debug|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.Build.0 = Debug|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.MonoTouch|Any CPU.ActiveCfg = MonoTouch|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.MonoTouch|Any CPU.Build.0 = MonoTouch|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.MonoTouch|x86.ActiveCfg = MonoTouch|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.MonoTouch|x86.Build.0 = MonoTouch|x86 + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x64.ActiveCfg = Debug|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x64.Build.0 = Debug|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.ActiveCfg = Debug|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.Build.0 = Debug|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|Any CPU.ActiveCfg = Release|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|Any CPU.Build.0 = Release|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.ActiveCfg = Release|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.Build.0 = Release|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.STATIC_ONLY NO_EXPRESSIONS|Any CPU.ActiveCfg = STATIC_ONLY NO_EXPRESSIONS|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.STATIC_ONLY NO_EXPRESSIONS|Any CPU.Build.0 = STATIC_ONLY NO_EXPRESSIONS|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.STATIC_ONLY NO_EXPRESSIONS|x86.ActiveCfg = STATIC_ONLY NO_EXPRESSIONS|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.STATIC_ONLY NO_EXPRESSIONS|x86.Build.0 = STATIC_ONLY NO_EXPRESSIONS|x86 + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x64.ActiveCfg = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x64.Build.0 = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.ActiveCfg = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.Build.0 = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|Any CPU.Build.0 = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|x64.ActiveCfg = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|x64.Build.0 = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|x86.ActiveCfg = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|Any CPU.ActiveCfg = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|Any CPU.Build.0 = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|x64.ActiveCfg = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|x64.Build.0 = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|x86.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x64.ActiveCfg = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x64.Build.0 = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x86.ActiveCfg = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x86.Build.0 = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|Any CPU.Build.0 = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x64.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x64.Build.0 = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x86.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x86.Build.0 = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x64.ActiveCfg = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x64.Build.0 = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x86.ActiveCfg = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x86.Build.0 = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|Any CPU.Build.0 = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x64.ActiveCfg = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x64.Build.0 = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x86.ActiveCfg = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {EDB2082E-E2C1-4E9D-9A60-F937634254A6} + EndGlobalSection EndGlobal diff --git a/src/ServiceStack.Redis.sln.DotSettings b/src/ServiceStack.Redis.sln.DotSettings index 11f2c267..28747c5c 100644 --- a/src/ServiceStack.Redis.sln.DotSettings +++ b/src/ServiceStack.Redis.sln.DotSettings @@ -1,3 +1,4 @@  + True <data><IncludeFilters /><ExcludeFilters /></data> <data /> \ No newline at end of file diff --git a/src/ServiceStack.Redis/BasicRedisClientManager.Async.cs b/src/ServiceStack.Redis/BasicRedisClientManager.Async.cs new file mode 100644 index 00000000..1b2bd382 --- /dev/null +++ b/src/ServiceStack.Redis/BasicRedisClientManager.Async.cs @@ -0,0 +1,180 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Caching; +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + /// + /// Provides thread-safe retrieval of redis clients since each client is a new one. + /// Allows the configuration of different ReadWrite and ReadOnly hosts + /// + public partial class BasicRedisClientManager + : IRedisClientsManagerAsync, ICacheClientAsync + { + private ValueTask GetCacheClientAsync(in CancellationToken _) + => new RedisClientManagerCacheClient(this).AsValueTaskResult(); + + private ValueTask GetReadOnlyCacheClientAsync(in CancellationToken _) + => ConfigureRedisClientAsync(this.GetReadOnlyClientImpl()).AsValueTaskResult(); + + private IRedisClientAsync ConfigureRedisClientAsync(IRedisClientAsync client) + => client; + + ValueTask IRedisClientsManagerAsync.GetCacheClientAsync(CancellationToken token) + => GetCacheClientAsync(token); + + ValueTask IRedisClientsManagerAsync.GetClientAsync(CancellationToken token) + => GetClientImpl().AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyCacheClientAsync(CancellationToken token) + => GetReadOnlyCacheClientAsync(token); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyClientAsync(CancellationToken token) + => GetReadOnlyClientImpl().AsValueTaskResult(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + async Task ICacheClientAsync.GetAsync(string key, CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + return await client.GetAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.FlushAllAsync(CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.FlushAllAsync(token).ConfigureAwait(false); + } + + async Task> ICacheClientAsync.GetAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + return await client.GetAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAllAsync(IDictionary values, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.SetAllAsync(values, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.RemoveAsync(string key, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.RemoveAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.RemoveAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.RemoveAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.IncrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.IncrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.DecrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.DecrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + return await client.GetTimeToLiveAsync(key, token).ConfigureAwait(false); + } + + async IAsyncEnumerable ICacheClientAsync.GetKeysByPatternAsync(string pattern, [EnumeratorCancellation] CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + await foreach (var key in client.GetKeysByPatternAsync(pattern, token).ConfigureAwait(false).WithCancellation(token)) + { + yield return key; + } + } + + async Task ICacheClientAsync.RemoveExpiredEntriesAsync(CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.RemoveExpiredEntriesAsync(token).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs b/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs index 43e42e21..aedf60eb 100644 --- a/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs +++ b/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs @@ -1,181 +1,138 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; -using ServiceStack.CacheAccess; +using ServiceStack.Caching; namespace ServiceStack.Redis { /// /// BasicRedisClientManager for ICacheClient /// - /// For more interoperabilty I'm also implementing the ICacheClient on + /// For more interoperability I'm also implementing the ICacheClient on /// this cache client manager which has the affect of calling /// GetCacheClient() for all write operations and GetReadOnlyCacheClient() /// for the read ones. /// - /// This works well for master-slave replication scenarios where you have - /// 1 master that replicates to multiple read slaves. + /// This works well for master-replica replication scenarios where you have + /// 1 master that replicates to multiple read replicas. /// public partial class BasicRedisClientManager : ICacheClient { - public ICacheClient GetCacheClient() - { - return ConfigureRedisClient(this.GetClient()); - } - - public ICacheClient GetReadOnlyCacheClient() - { - return ConfigureRedisClient(this.GetReadOnlyClient()); - } + public ICacheClient GetCacheClient() => + new RedisClientManagerCacheClient(this); - private ICacheClient ConfigureRedisClient(IRedisClient client) - { - return client; - } + public ICacheClient GetReadOnlyCacheClient() => + ConfigureRedisClient(this.GetReadOnlyClientImpl()); - #region Implementation of ICacheClient + private ICacheClient ConfigureRedisClient(IRedisClient client) => client; public bool Remove(string key) { - using (var client = GetReadOnlyCacheClient()) - { - return client.Remove(key); - } + using var client = GetReadOnlyCacheClient(); + return client.Remove(key); } public void RemoveAll(IEnumerable keys) { - using (var client = GetCacheClient()) - { - client.RemoveAll(keys); - } + using var client = GetCacheClient(); + client.RemoveAll(keys); } public T Get(string key) { - using (var client = GetReadOnlyCacheClient()) - { - return client.Get(key); - } + using var client = GetReadOnlyCacheClient(); + return client.Get(key); } public long Increment(string key, uint amount) { - using (var client = GetCacheClient()) - { - return client.Increment(key, amount); - } + using var client = GetCacheClient(); + return client.Increment(key, amount); } public long Decrement(string key, uint amount) { - using (var client = GetCacheClient()) - { - return client.Decrement(key, amount); - } + using var client = GetCacheClient(); + return client.Decrement(key, amount); } public bool Add(string key, T value) { - using (var client = GetCacheClient()) - { - return client.Add(key, value); - } + using var client = GetCacheClient(); + return client.Add(key, value); } public bool Set(string key, T value) { - using (var client = GetCacheClient()) - { - return client.Set(key, value); - } + using var client = GetCacheClient(); + return client.Set(key, value); } public bool Replace(string key, T value) { - using (var client = GetCacheClient()) - { - return client.Replace(key, value); - } + using var client = GetCacheClient(); + return client.Replace(key, value); } public bool Add(string key, T value, DateTime expiresAt) { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresAt); - } + using var client = GetCacheClient(); + return client.Add(key, value, expiresAt); } public bool Set(string key, T value, DateTime expiresAt) { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresAt); - } + using var client = GetCacheClient(); + return client.Set(key, value, expiresAt); } public bool Replace(string key, T value, DateTime expiresAt) { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresAt); - } + using var client = GetCacheClient(); + return client.Replace(key, value, expiresAt); } public bool Add(string key, T value, TimeSpan expiresIn) { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresIn); - } + using var client = GetCacheClient(); + return client.Add(key, value, expiresIn); } public bool Set(string key, T value, TimeSpan expiresIn) { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresIn); - } + using var client = GetCacheClient(); + return client.Set(key, value, expiresIn); } public bool Replace(string key, T value, TimeSpan expiresIn) { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresIn); - } + using var client = GetCacheClient(); + return client.Replace(key, value, expiresIn); } public void FlushAll() { - using (var client = GetCacheClient()) - { - client.FlushAll(); - } + using var client = GetCacheClient(); + client.FlushAll(); } public IDictionary GetAll(IEnumerable keys) { - using (var client = GetReadOnlyCacheClient()) - { - return client.GetAll(keys); - } + using var client = GetReadOnlyCacheClient(); + return client.GetAll(keys); } - - #endregion } diff --git a/src/ServiceStack.Redis/BasicRedisClientManager.cs b/src/ServiceStack.Redis/BasicRedisClientManager.cs index 6927a68f..d42c4f51 100644 --- a/src/ServiceStack.Redis/BasicRedisClientManager.cs +++ b/src/ServiceStack.Redis/BasicRedisClientManager.cs @@ -1,163 +1,181 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; -using ServiceStack.Common.Web; +using System.Linq; +using System.Threading; +using ServiceStack.Logging; +using ServiceStack.Text; namespace ServiceStack.Redis { - /// - /// Provides thread-safe retrievel of redis clients since each client is a new one. - /// Allows the configuration of different ReadWrite and ReadOnly hosts - /// - public partial class BasicRedisClientManager - : IRedisClientsManager - { - private List ReadWriteHosts { get; set; } - private List ReadOnlyHosts { get; set; } + /// + /// Provides thread-safe retrieval of redis clients since each client is a new one. + /// Allows the configuration of different ReadWrite and ReadOnly hosts + /// + public partial class BasicRedisClientManager + : IRedisClientsManager, IRedisFailover, IHasRedisResolver + { + public static ILog Log = LogManager.GetLogger(typeof(BasicRedisClientManager)); + public int? ConnectTimeout { get; set; } + public int? SocketSendTimeout { get; set; } + public int? SocketReceiveTimeout { get; set; } + public int? IdleTimeOutSecs { get; set; } /// /// Gets or sets object key prefix. /// public string NamespacePrefix { get; set; } - private int readWriteHostsIndex; - private int readOnlyHostsIndex; + private int readWriteHostsIndex; + private int readOnlyHostsIndex; - public IRedisClientFactory RedisClientFactory { get; set; } + protected int RedisClientCounter = 0; - public int Db { get; private set; } + public Func ClientFactory { get; set; } + + public long? Db { get; private set; } public Action ConnectionFilter { get; set; } - public BasicRedisClientManager() : this(RedisNativeClient.DefaultHost) { } - - public BasicRedisClientManager(params string[] readWriteHosts) - : this(readWriteHosts, readWriteHosts) {} - - public BasicRedisClientManager(int initialDb, params string[] readWriteHosts) - : this(readWriteHosts, readWriteHosts, initialDb) {} - - /// - /// Hosts can be an IP Address or Hostname in the format: host[:port] - /// e.g. 127.0.0.1:6379 - /// default is: localhost:6379 - /// - /// The write hosts. - /// The read hosts. - public BasicRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts) - : this(readWriteHosts, readOnlyHosts, RedisNativeClient.DefaultDb) - { - } - - public BasicRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - int initalDb) - { - this.Db = initalDb; - - ReadWriteHosts = readWriteHosts.ToRedisEndPoints(); - ReadOnlyHosts = readOnlyHosts.ToRedisEndPoints(); - - this.RedisClientFactory = Redis.RedisClientFactory.Instance; - - this.OnStart(); - } - - protected virtual void OnStart() - { - this.Start(); - } - - /// - /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - /// - /// - public IRedisClient GetClient() - { - var nextHost = ReadWriteHosts[readWriteHostsIndex++ % ReadWriteHosts.Count]; - var client = RedisClientFactory.CreateRedisClient( - nextHost.Host, nextHost.Port); + public List> OnFailover { get; private set; } - if (this.ConnectTimeout != null) - { - client.ConnectTimeout = this.ConnectTimeout.Value; - } + public IRedisResolver RedisResolver { get; set; } - //Set database to userSpecified if different - if (Db != RedisNativeClient.DefaultDb) - { - client.Db = Db; - } + public BasicRedisClientManager() : this(RedisConfig.DefaultHost) { } - if (nextHost.RequiresAuth) - client.Password = nextHost.Password; + public BasicRedisClientManager(params string[] readWriteHosts) + : this(readWriteHosts, readWriteHosts) { } - client.NamespacePrefix = NamespacePrefix; - client.ConnectionFilter = ConnectionFilter; + public BasicRedisClientManager(int initialDb, params string[] readWriteHosts) + : this(readWriteHosts, readWriteHosts, initialDb) { } + + /// + /// Hosts can be an IP Address or Hostname in the format: host[:port] + /// e.g. 127.0.0.1:6379 + /// default is: localhost:6379 + /// + /// The write hosts. + /// The read hosts. + /// + public BasicRedisClientManager( + IEnumerable readWriteHosts, + IEnumerable readOnlyHosts, + long? initalDb = null) + : this(readWriteHosts.ToRedisEndPoints(), readOnlyHosts.ToRedisEndPoints(), initalDb) {} - return client; - } + public BasicRedisClientManager( + IEnumerable readWriteHosts, + IEnumerable readOnlyHosts, + long? initalDb = null) + { + this.Db = initalDb; + + RedisResolver = new RedisResolver(readWriteHosts, readOnlyHosts); + + this.OnFailover = new List>(); + + JsConfig.InitStatics(); + + this.OnStart(); + } + + protected virtual void OnStart() + { + this.Start(); + } + + /// + /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts + /// + /// + public IRedisClient GetClient() => GetClientImpl(); + private RedisClient GetClientImpl() + { + var client = InitNewClient(RedisResolver.CreateMasterClient(readWriteHostsIndex++)); + return client; + } - /// - /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - /// - /// - public virtual IRedisClient GetReadOnlyClient() - { - var nextHost = ReadOnlyHosts[readOnlyHostsIndex++ % ReadOnlyHosts.Count]; - var client = RedisClientFactory.CreateRedisClient( - nextHost.Host, nextHost.Port); + /// + /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. + /// + /// + public virtual IRedisClient GetReadOnlyClient() => GetReadOnlyClientImpl(); + private RedisClient GetReadOnlyClientImpl() + { + var client = InitNewClient(RedisResolver.CreateSlaveClient(readOnlyHostsIndex++)); + return client; + } + private RedisClient InitNewClient(RedisClient client) + { + client.Id = Interlocked.Increment(ref RedisClientCounter); + client.ConnectionFilter = ConnectionFilter; if (this.ConnectTimeout != null) - { client.ConnectTimeout = this.ConnectTimeout.Value; + if (this.SocketSendTimeout.HasValue) + client.SendTimeout = this.SocketSendTimeout.Value; + if (this.SocketReceiveTimeout.HasValue) + client.ReceiveTimeout = this.SocketReceiveTimeout.Value; + if (this.IdleTimeOutSecs.HasValue) + client.IdleTimeOutSecs = this.IdleTimeOutSecs.Value; + if (this.NamespacePrefix != null) + client.NamespacePrefix = NamespacePrefix; + if (Db != null && client.Db != Db) //Reset database to default if changed + client.ChangeDb(Db.Value); + + return client; + } + + public void SetAll(IDictionary values) + { + foreach (var entry in values) + { + Set(entry.Key, entry.Value); } + } - //Set database to userSpecified if different - if (Db != RedisNativeClient.DefaultDb) - { - client.Db = Db; - } + public void Start() + { + readWriteHostsIndex = 0; + readOnlyHostsIndex = 0; + } - if (nextHost.RequiresAuth) - client.Password = nextHost.Password; + public void FailoverTo(params string[] readWriteHosts) + { + FailoverTo(readWriteHosts, readWriteHosts); + } - client.NamespacePrefix = NamespacePrefix; - client.ConnectionFilter = ConnectionFilter; + public void FailoverTo(IEnumerable readWriteHosts, IEnumerable readOnlyHosts) + { + Interlocked.Increment(ref RedisState.TotalFailovers); - return client; - } - - public void SetAll(IDictionary values) - { - foreach (var entry in values) - { - Set(entry.Key, entry.Value); - } - } - - public void Start() - { - readWriteHostsIndex = 0; - readOnlyHostsIndex = 0; - } - - public void Dispose() - { - } - } + var masters = readWriteHosts.ToList(); + var replicas = readOnlyHosts.ToList(); + + Log.Info($"FailoverTo: {string.Join(",", masters)} : {string.Join(",", replicas)} Total: {RedisState.TotalFailovers}"); + + lock (this) + { + RedisResolver.ResetMasters(masters); + RedisResolver.ResetSlaves(replicas); + } + + Start(); + } + + public void Dispose() + { + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/BasicRedisResolver.cs b/src/ServiceStack.Redis/BasicRedisResolver.cs new file mode 100644 index 00000000..31986996 --- /dev/null +++ b/src/ServiceStack.Redis/BasicRedisResolver.cs @@ -0,0 +1,88 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using ServiceStack.Logging; + +namespace ServiceStack.Redis +{ + public class BasicRedisResolver : IRedisResolver, IRedisResolverExtended + { + static ILog log = LogManager.GetLogger(typeof(BasicRedisResolver)); + + public Func ClientFactory { get; set; } + + public int ReadWriteHostsCount { get; private set; } + public int ReadOnlyHostsCount { get; private set; } + + private RedisEndpoint[] masters; + private RedisEndpoint[] replicas; + + public RedisEndpoint[] Masters => masters; + public RedisEndpoint[] Replicas => replicas; + + public BasicRedisResolver(IEnumerable masters, IEnumerable replicas) + { + ResetMasters(masters.ToList()); + ResetSlaves(replicas.ToList()); + ClientFactory = RedisConfig.ClientFactory; + } + + public virtual void ResetMasters(IEnumerable hosts) + { + ResetMasters(hosts.ToRedisEndPoints()); + } + + public virtual void ResetMasters(List newMasters) + { + if (newMasters == null || newMasters.Count == 0) + throw new Exception("Must provide at least 1 master"); + + masters = newMasters.ToArray(); + ReadWriteHostsCount = masters.Length; + + if (log.IsDebugEnabled) + log.Debug("New Redis Masters: " + string.Join(", ", masters.Map(x => x.GetHostString()))); + } + + public virtual void ResetSlaves(IEnumerable hosts) + { + ResetSlaves(hosts.ToRedisEndPoints()); + } + + public virtual void ResetSlaves(List newReplicas) + { + replicas = (newReplicas ?? TypeConstants.EmptyList).ToArray(); + ReadOnlyHostsCount = replicas.Length; + + if (log.IsDebugEnabled) + log.Debug("New Redis Replicas: " + string.Join(", ", replicas.Map(x => x.GetHostString()))); + } + + public RedisClient CreateRedisClient(RedisEndpoint config, bool master) + { + return ClientFactory(config); + } + + public RedisEndpoint GetReadWriteHost(int desiredIndex) + { + return masters[desiredIndex % masters.Length]; + } + + public RedisEndpoint GetReadOnlyHost(int desiredIndex) + { + return ReadOnlyHostsCount > 0 + ? replicas[desiredIndex % replicas.Length] + : GetReadWriteHost(desiredIndex); + } + + public RedisClient CreateMasterClient(int desiredIndex) + { + return CreateRedisClient(GetReadWriteHost(desiredIndex), master: true); + } + + public RedisClient CreateSlaveClient(int desiredIndex) + { + return CreateRedisClient(GetReadOnlyHost(desiredIndex), master: false); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/BufferPool.cs b/src/ServiceStack.Redis/BufferPool.cs deleted file mode 100644 index 9e66a764..00000000 --- a/src/ServiceStack.Redis/BufferPool.cs +++ /dev/null @@ -1,78 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; - -namespace ServiceStack.Redis -{ - /// - /// Courtesy of @marcgravell - /// http://code.google.com/p/protobuf-net/source/browse/trunk/protobuf-net/BufferPool.cs - /// - internal class BufferPool - { - internal static void Flush() - { - for (int i = 0; i < pool.Length; i++) - { - Interlocked.Exchange(ref pool[i], null); // and drop the old value on the floor - } - } - - private BufferPool() { } - const int PoolSize = 1000; //1.45MB - internal const int BufferLength = 1450; //MTU size - some headers - private static readonly object[] pool = new object[PoolSize]; - - internal static byte[] GetBuffer() - { - object tmp; - for (int i = 0; i < pool.Length; i++) - { - if ((tmp = Interlocked.Exchange(ref pool[i], null)) != null) - return (byte[])tmp; - } - return new byte[BufferLength]; - } - - internal static void ResizeAndFlushLeft(ref byte[] buffer, int toFitAtLeastBytes, int copyFromIndex, int copyBytes) - { - Debug.Assert(buffer != null); - Debug.Assert(toFitAtLeastBytes > buffer.Length); - Debug.Assert(copyFromIndex >= 0); - Debug.Assert(copyBytes >= 0); - - // try doubling, else match - int newLength = buffer.Length * 2; - if (newLength < toFitAtLeastBytes) newLength = toFitAtLeastBytes; - - var newBuffer = new byte[newLength]; - if (copyBytes > 0) - { - Buffer.BlockCopy(buffer, copyFromIndex, newBuffer, 0, copyBytes); - } - if (buffer.Length == BufferLength) - { - ReleaseBufferToPool(ref buffer); - } - buffer = newBuffer; - } - - internal static void ReleaseBufferToPool(ref byte[] buffer) - { - if (buffer == null) return; - if (buffer.Length == BufferLength) - { - for (int i = 0; i < pool.Length; i++) - { - if (Interlocked.CompareExchange(ref pool[i], buffer, null) == null) - { - break; // found a null; swapped it in - } - } - } - // if no space, just drop it on the floor - buffer = null; - } - - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/BufferedReader.Async.cs b/src/ServiceStack.Redis/BufferedReader.Async.cs new file mode 100644 index 00000000..39ad9d7e --- /dev/null +++ b/src/ServiceStack.Redis/BufferedReader.Async.cs @@ -0,0 +1,93 @@ +using ServiceStack.Redis.Internal; +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal sealed partial class BufferedReader + { + internal ValueTask ReadByteAsync(in CancellationToken token = default) + => _available > 0 ? ReadByteFromBuffer().AsValueTaskResult() : ReadByteSlowAsync(token); + + private ValueTask ReadByteSlowAsync(in CancellationToken token) + { + token.ThrowIfCancellationRequested(); + _offset = 0; +#if ASYNC_MEMORY + var pending = _source.ReadAsync(new Memory(_buffer), token); + if (!pending.IsCompletedSuccessfully) + return Awaited(this, pending); +#else + var pending = _source.ReadAsync(_buffer, 0, _buffer.Length, token); + if (pending.Status != TaskStatus.RanToCompletion) + return Awaited(this, pending); +#endif + + _available = pending.Result; + return (_available > 0 ? ReadByteFromBuffer() : -1).AsValueTaskResult(); + +#if ASYNC_MEMORY + static async ValueTask Awaited(BufferedReader @this, ValueTask pending) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadByteFromBuffer() : -1; + } +#else + static async ValueTask Awaited(BufferedReader @this, Task pending) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadByteFromBuffer() : -1; + } +#endif + } + + internal ValueTask ReadAsync(byte[] buffer, int offset, int count, in CancellationToken token = default) + => _available > 0 + ? ReadFromBuffer(buffer, offset, count).AsValueTaskResult() + : ReadSlowAsync(buffer, offset, count, token); + + private ValueTask ReadSlowAsync(byte[] buffer, int offset, int count, in CancellationToken token) + { + // if they're asking for more than we deal in, just step out of the way + if (count >= buffer.Length) + { +#if ASYNC_MEMORY + return _source.ReadAsync(new Memory(buffer, offset, count), token); +#else + return new ValueTask(_source.ReadAsync(buffer, offset, count, token)); +#endif + } + + // they're asking for less, so we could still have some left + _offset = 0; +#if ASYNC_MEMORY + var pending = _source.ReadAsync(new Memory(_buffer), token); + if (!pending.IsCompletedSuccessfully) + return Awaited(this, pending, buffer, offset, count); + + _available = pending.Result; // already checked status, this is fine + return (_available > 0 ? ReadFromBuffer(buffer, offset, count) : 0).AsValueTaskResult(); + + static async ValueTask Awaited(BufferedReader @this, ValueTask pending, byte[] buffer, int offset, int count) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadFromBuffer(buffer, offset, count) : 0; + } +#else + var pending = _source.ReadAsync(_buffer, 0, _buffer.Length, token); + if (pending.Status != TaskStatus.RanToCompletion) + return Awaited(this, pending, buffer, offset, count); + + _available = pending.Result; // already checked status, this is fine + return (_available > 0 ? ReadFromBuffer(buffer, offset, count) : 0).AsValueTaskResult(); + + static async ValueTask Awaited(BufferedReader @this, Task pending, byte[] buffer, int offset, int count) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadFromBuffer(buffer, offset, count) : 0; + } +#endif + } + } +} diff --git a/src/ServiceStack.Redis/BufferedReader.cs b/src/ServiceStack.Redis/BufferedReader.cs new file mode 100644 index 00000000..b5aa67df --- /dev/null +++ b/src/ServiceStack.Redis/BufferedReader.cs @@ -0,0 +1,81 @@ +using System; +using System.IO; + +namespace ServiceStack.Redis +{ + /// + /// BufferedReader is a minimal buffer implementation that provides + /// efficient sync and async access for byte-by-byte consumption; + /// like BufferedStream, but with the async part + /// + internal sealed partial class BufferedReader : IDisposable + { + private readonly Stream _source; + readonly byte[] _buffer; + private int _offset, _available; + public void Dispose() + { + _available = 0; + _source.Dispose(); + } + internal void Close() + { + _available = 0; + _source.Close(); + } + + internal BufferedReader(Stream source, int bufferSize) + { + _source = source; + _buffer = new byte[bufferSize]; + Reset(); + } + + internal void Reset() + { + _offset = _available = 0; + } + + internal int ReadByte() + => _available > 0 ? ReadByteFromBuffer() : ReadByteSlow(); + + private int ReadByteFromBuffer() + { + --_available; + return _buffer[_offset++]; + } + + private int ReadByteSlow() + { + _available = _source.Read(_buffer, _offset = 0, _buffer.Length); + return _available > 0 ? ReadByteFromBuffer() : -1; + } + + + private int ReadFromBuffer(byte[] buffer, int offset, int count) + { + // we have data in the buffer; hand it back + if (_available < count) count = _available; + Buffer.BlockCopy(_buffer, _offset, buffer, offset, count); + _available -= count; + _offset += count; + return count; + } + + internal int Read(byte[] buffer, int offset, int count) + => _available > 0 + ? ReadFromBuffer(buffer, offset, count) + : ReadSlow(buffer, offset, count); + + private int ReadSlow(byte[] buffer, int offset, int count) + { + // if they're asking for more than we deal in, just step out of the way + if (count >= buffer.Length) + return _source.Read(buffer, offset, count); + + // they're asking for less, so we could still have some left + _available = _source.Read(_buffer, _offset = 0, _buffer.Length); + return _available > 0 ? ReadFromBuffer(buffer, offset, count) : 0; + } + } +} diff --git a/src/ServiceStack.Redis/BufferedStream.cs b/src/ServiceStack.Redis/BufferedStream.cs new file mode 100644 index 00000000..2d745c5f --- /dev/null +++ b/src/ServiceStack.Redis/BufferedStream.cs @@ -0,0 +1,49 @@ +#if NETCORE +using System; +using System.IO; +using System.Net.Sockets; + +namespace ServiceStack.Redis +{ + // recommendation: mark this obsolete as it is incomplete, and no longer used; + // I've marked it obsolete in DEBUG to be sure +#if DEBUG + [Obsolete("Prefer System.IO.BufferedStream")] +#endif + public sealed class BufferedStream : Stream + { + Stream networkStream; + + public BufferedStream(Stream stream) + : this(stream, 0) {} + + public BufferedStream(Stream stream, int bufferSize) + { + networkStream = stream; + } + public override bool CanRead => networkStream.CanRead; + + public override bool CanSeek => networkStream.CanSeek; + + public override bool CanWrite => networkStream.CanWrite; + + public override long Position + { + get { return networkStream.Position; } + set { networkStream.Position = value; } + } + + public override long Length => networkStream.Length; + + public override int Read(byte[] buffer, int offset, int length) => networkStream.Read(buffer, offset, length); + + public override void Write(byte[] buffer, int offset, int length) => networkStream.Write(buffer, offset, length); + + public override void Flush() => networkStream.Flush(); + + public override void SetLength(long length) => networkStream.SetLength(length); + + public override long Seek(long position, SeekOrigin origin) => networkStream.Seek(position, origin); + } +} +#endif \ No newline at end of file diff --git a/src/ServiceStack.Redis/Commands.cs b/src/ServiceStack.Redis/Commands.cs index 37df50ea..bf4c7e62 100644 --- a/src/ServiceStack.Redis/Commands.cs +++ b/src/ServiceStack.Redis/Commands.cs @@ -1,3 +1,4 @@ +using System; using ServiceStack.Text; namespace ServiceStack.Redis @@ -30,6 +31,7 @@ public static class Commands public readonly static byte[] BgSave = "BGSAVE".ToUtf8Bytes(); public readonly static byte[] LastSave = "LASTSAVE".ToUtf8Bytes(); public readonly static byte[] Shutdown = "SHUTDOWN".ToUtf8Bytes(); + public readonly static byte[] NoSave = "NOSAVE".ToUtf8Bytes(); public readonly static byte[] BgRewriteAof = "BGREWRITEAOF".ToUtf8Bytes(); public readonly static byte[] Info = "INFO".ToUtf8Bytes(); @@ -37,8 +39,10 @@ public static class Commands public readonly static byte[] No = "NO".ToUtf8Bytes(); public readonly static byte[] One = "ONE".ToUtf8Bytes(); public readonly static byte[] ResetStat = "RESETSTAT".ToUtf8Bytes(); + public readonly static byte[] Rewrite = "REWRITE".ToUtf8Bytes(); public readonly static byte[] Time = "TIME".ToUtf8Bytes(); public readonly static byte[] Segfault = "SEGFAULT".ToUtf8Bytes(); + public readonly static byte[] Sleep = "SLEEP".ToUtf8Bytes(); public readonly static byte[] Dump = "DUMP".ToUtf8Bytes(); public readonly static byte[] Restore = "RESTORE".ToUtf8Bytes(); public readonly static byte[] Migrate = "MIGRATE".ToUtf8Bytes(); @@ -48,6 +52,16 @@ public static class Commands public readonly static byte[] Monitor = "MONITOR".ToUtf8Bytes(); //missing public readonly static byte[] Debug = "DEBUG".ToUtf8Bytes(); //missing public readonly static byte[] Config = "CONFIG".ToUtf8Bytes(); //missing + public readonly static byte[] Client = "CLIENT".ToUtf8Bytes(); + public readonly static byte[] List = "LIST".ToUtf8Bytes(); + public readonly static byte[] Kill = "KILL".ToUtf8Bytes(); + public readonly static byte[] Addr = "ADDR".ToUtf8Bytes(); + public readonly static byte[] Id = "ID".ToUtf8Bytes(); + public readonly static byte[] SkipMe = "SKIPME".ToUtf8Bytes(); + public readonly static byte[] SetName = "SETNAME".ToUtf8Bytes(); + public readonly static byte[] GetName = "GETNAME".ToUtf8Bytes(); + public readonly static byte[] Pause = "PAUSE".ToUtf8Bytes(); + public readonly static byte[] Role = "ROLE".ToUtf8Bytes(); //public readonly static byte[] Get = "GET".ToUtf8Bytes(); //public readonly static byte[] Set = "SET".ToUtf8Bytes(); @@ -68,14 +82,27 @@ public static class Commands public readonly static byte[] Decr = "DECR".ToUtf8Bytes(); public readonly static byte[] DecrBy = "DECRBY".ToUtf8Bytes(); public readonly static byte[] Append = "APPEND".ToUtf8Bytes(); - public readonly static byte[] Substr = "SUBSTR".ToUtf8Bytes(); public readonly static byte[] GetRange = "GETRANGE".ToUtf8Bytes(); public readonly static byte[] SetRange = "SETRANGE".ToUtf8Bytes(); public readonly static byte[] GetBit = "GETBIT".ToUtf8Bytes(); public readonly static byte[] SetBit = "SETBIT".ToUtf8Bytes(); + public readonly static byte[] BitCount = "BITCOUNT".ToUtf8Bytes(); + + public readonly static byte[] Scan = "SCAN".ToUtf8Bytes(); + public readonly static byte[] SScan = "SSCAN".ToUtf8Bytes(); + public readonly static byte[] HScan = "HSCAN".ToUtf8Bytes(); + public readonly static byte[] ZScan = "ZSCAN".ToUtf8Bytes(); + public readonly static byte[] Match = "MATCH".ToUtf8Bytes(); + public readonly static byte[] Count = "COUNT".ToUtf8Bytes(); + + public readonly static byte[] PfAdd = "PFADD".ToUtf8Bytes(); + public readonly static byte[] PfCount = "PFCOUNT".ToUtf8Bytes(); + public readonly static byte[] PfMerge = "PFMERGE".ToUtf8Bytes(); public readonly static byte[] RPush = "RPUSH".ToUtf8Bytes(); public readonly static byte[] LPush = "LPUSH".ToUtf8Bytes(); + public readonly static byte[] RPushX = "RPUSHX".ToUtf8Bytes(); + public readonly static byte[] LPushX = "LPUSHX".ToUtf8Bytes(); public readonly static byte[] LLen = "LLEN".ToUtf8Bytes(); public readonly static byte[] LRange = "LRANGE".ToUtf8Bytes(); public readonly static byte[] LTrim = "LTRIM".ToUtf8Bytes(); @@ -123,6 +150,9 @@ public static class Commands public readonly static byte[] ZRemRangeByScore = "ZREMRANGEBYSCORE".ToUtf8Bytes(); public readonly static byte[] ZUnionStore = "ZUNIONSTORE".ToUtf8Bytes(); public readonly static byte[] ZInterStore = "ZINTERSTORE".ToUtf8Bytes(); + public static readonly byte[] ZRangeByLex = "ZRANGEBYLEX".ToUtf8Bytes(); + public static readonly byte[] ZLexCount = "ZLEXCOUNT".ToUtf8Bytes(); + public static readonly byte[] ZRemRangeByLex = "ZREMRANGEBYLEX".ToUtf8Bytes(); public readonly static byte[] HSet = "HSET".ToUtf8Bytes(); public readonly static byte[] HSetNx = "HSETNX".ToUtf8Bytes(); @@ -167,7 +197,57 @@ public static class Commands public readonly static byte[] Load = "LOAD".ToUtf8Bytes(); //public readonly static byte[] Exists = "EXISTS".ToUtf8Bytes(); public readonly static byte[] Flush = "FLUSH".ToUtf8Bytes(); - public readonly static byte[] Kill = "KILL".ToUtf8Bytes(); public readonly static byte[] Slowlog = "SLOWLOG".ToUtf8Bytes(); + + public readonly static byte[] Ex = "EX".ToUtf8Bytes(); + public readonly static byte[] Px = "PX".ToUtf8Bytes(); + public readonly static byte[] Nx = "NX".ToUtf8Bytes(); + public readonly static byte[] Xx = "XX".ToUtf8Bytes(); + + // Sentinel commands + public readonly static byte[] Sentinel = "SENTINEL".ToUtf8Bytes(); + public readonly static byte[] Masters = "masters".ToUtf8Bytes(); + public readonly static byte[] Sentinels = "sentinels".ToUtf8Bytes(); + public readonly static byte[] Master = "master".ToUtf8Bytes(); + public readonly static byte[] Slaves = "slaves".ToUtf8Bytes(); + public readonly static byte[] Failover = "failover".ToUtf8Bytes(); + public readonly static byte[] GetMasterAddrByName = "get-master-addr-by-name".ToUtf8Bytes(); + + //Geo commands + public readonly static byte[] GeoAdd = "GEOADD".ToUtf8Bytes(); + public readonly static byte[] GeoDist = "GEODIST".ToUtf8Bytes(); + public readonly static byte[] GeoHash = "GEOHASH".ToUtf8Bytes(); + public readonly static byte[] GeoPos = "GEOPOS".ToUtf8Bytes(); + public readonly static byte[] GeoRadius = "GEORADIUS".ToUtf8Bytes(); + public readonly static byte[] GeoRadiusByMember = "GEORADIUSBYMEMBER".ToUtf8Bytes(); + + public readonly static byte[] WithCoord = "WITHCOORD".ToUtf8Bytes(); + public readonly static byte[] WithDist = "WITHDIST".ToUtf8Bytes(); + public readonly static byte[] WithHash = "WITHHASH".ToUtf8Bytes(); + + public readonly static byte[] Meters = RedisGeoUnit.Meters.ToUtf8Bytes(); + public readonly static byte[] Kilometers = RedisGeoUnit.Kilometers.ToUtf8Bytes(); + public readonly static byte[] Miles = RedisGeoUnit.Miles.ToUtf8Bytes(); + public readonly static byte[] Feet = RedisGeoUnit.Feet.ToUtf8Bytes(); + + public static byte[] GetUnit(string unit) + { + if (unit == null) + throw new ArgumentNullException("unit"); + + switch (unit) + { + case RedisGeoUnit.Meters: + return Meters; + case RedisGeoUnit.Kilometers: + return Kilometers; + case RedisGeoUnit.Miles: + return Miles; + case RedisGeoUnit.Feet: + return Feet; + default: + throw new NotSupportedException("Unit '{0}' is not a valid unit".Fmt(unit)); + } + } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/ConnectionUtils.cs b/src/ServiceStack.Redis/ConnectionUtils.cs deleted file mode 100644 index f1731b3e..00000000 --- a/src/ServiceStack.Redis/ConnectionUtils.cs +++ /dev/null @@ -1,449 +0,0 @@ -//using System; -//using System.Collections.Generic; -//using System.Collections.Specialized; -//using System.IO; -//using System.Linq; -//using System.Text; - -//namespace ServiceStack.Redis -//{ -// /// -// /// Provides utility methods for managing connections to multiple (master/slave) redis servers (with the same -// /// information - not sharding). -// /// -// public static class ConnectionUtils -// { -// /// -// /// Inspect the provided configration, and connect to the available servers to report which server is the preferred/active node. -// /// -// public static string SelectConfiguration(string configuration, out string[] availableEndpoints, TextWriter log = null) -// { -// string selected; -// using (SelectAndCreateConnection(configuration, log, out selected, out availableEndpoints, false)) { } -// return selected; -// } -// /// -// /// Inspect the provided configration, and connect to the preferred/active node after checking what nodes are available. -// /// -// public static RedisConnection Connect(string configuration, TextWriter log = null) -// { -// string selectedConfiguration; -// string[] availableEndpoints; -// return SelectAndCreateConnection(configuration, log, out selectedConfiguration, out availableEndpoints, true); -// } - -// /// -// /// Subscribe to perform some operation when a change to the preferred/active node is broadcast. -// /// -// public static void SubscribeToMasterSwitch(RedisSubscriberConnection connection, Action handler) -// { -// if (connection == null) throw new ArgumentNullException("connection"); -// if (handler == null) throw new ArgumentNullException("handler"); - -// connection.Subscribe(RedisMasterChangedChannel, (channel, message) => handler(Encoding.UTF8.GetString(message))); -// } -// /// -// /// Using the configuration available, and after checking which nodes are available, switch the master node and broadcast this change. -// /// -// public static void SwitchMaster(string configuration, string newMaster, TextWriter log = null) -// { -// string newConfig; -// string[] availableEndpoints; - -// SelectAndCreateConnection(configuration, log, out newConfig, out availableEndpoints, false, newMaster); -// } - -// const string RedisMasterChangedChannel = "__Booksleeve_MasterChanged", TieBreakerKey = "__Booksleeve_TieBreak"; - -// /// -// /// Prompt all clients to reconnect. -// /// -// public static void BroadcastReconnectMessage(RedisConnection connection) -// { -// if (connection == null) throw new ArgumentNullException("connection"); - -// connection.Wait(connection.Publish(RedisMasterChangedChannel, "*")); -// } -// private static RedisConnection SelectWithTieBreak(TextWriter log, List nodes, Dictionary tiebreakers) -// { -// if (nodes.Count == 0) return null; -// if (nodes.Count == 1) return nodes[0]; -// Func valueOrDefault = key => -// { -// int tmp; -// if (!tiebreakers.TryGetValue(key, out tmp)) tmp = 0; -// return tmp; -// }; -// var tuples = (from node in nodes -// let key = node.Host + ":" + node.Port -// let count = valueOrDefault(key) -// select new { Node = node, Key = key, Count = count }).ToList(); - -// // check for uncontested scenario -// int contenderCount = tuples.Count(x => x.Count > 0); -// switch (contenderCount) -// { -// case 0: -// log.WriteLine("No tie-break contenders; selecting arbitrary node"); -// return tuples[0].Node; -// case 1: -// log.WriteLine("Unaminous tie-break winner"); -// return tuples.Single(x => x.Count > 0).Node; -// } - -// // contested -// int maxCount = tuples.Max(x => x.Count); -// var competing = tuples.Where(x => x.Count == maxCount).ToList(); - -// switch (competing.Count) -// { -// case 0: -// return null; // impossible, but never rely on the impossible not happening ;p -// case 1: -// log.WriteLine("Contested, but clear, tie-break winner"); -// break; -// default: -// log.WriteLine("Contested and ambiguous tie-break; selecting arbitrary node"); -// break; -// } -// return competing[0].Node; -// } - -// private static string[] GetConfigurationOptions(string configuration, out int syncTimeout, out bool allowAdmin) -// { -// syncTimeout = 1000; -// allowAdmin = false; - -// // break it down by commas -// var arr = configuration.Split(','); -// var options = new List(); -// foreach (var option in arr) -// { -// var trimmed = option.Trim(); - -// if (trimmed.IsNullOrWhiteSpace() || options.Contains(trimmed)) continue; - -// // check for special tokens -// int idx = trimmed.IndexOf('='); -// if (idx > 0) -// { -// if (option.StartsWith(SyncTimeoutPrefix)) -// { -// int tmp; -// if (int.TryParse(option.Substring(idx + 1), out tmp)) syncTimeout = tmp; -// continue; -// } -// if (option.StartsWith(AllowAdminPrefix)) -// { -// bool tmp; -// if (bool.TryParse(option.Substring(idx + 1), out tmp)) allowAdmin = tmp; -// continue; -// } -// } - -// options.Add(trimmed); -// } -// return options.ToArray(); -// } - -// internal const string AllowAdminPrefix = "allowAdmin=", SyncTimeoutPrefix = "syncTimeout="; -// private static RedisConnection SelectAndCreateConnection(string configuration, TextWriter log, out string selectedConfiguration, out string[] availableEndpoints, bool autoMaster, string newMaster = null) -// { -// int syncTimeout; -// bool allowAdmin; -// if (log == null) log = new StringWriter(); -// var arr = GetConfigurationOptions(configuration, out syncTimeout, out allowAdmin); -// if (!newMaster.IsNullOrWhiteSpace()) allowAdmin = true; // need this to diddle the slave/master config - -// log.WriteLine("{0} unique nodes specified", arr.Length); -// log.WriteLine("sync timeout: {0}ms, admin commands: {1}", syncTimeout, -// allowAdmin ? "enabled" : "disabled"); -// if (arr.Length == 0) -// { -// log.WriteLine("No nodes to consider"); -// selectedConfiguration = null; -// availableEndpoints = new string[0]; -// return null; -// } -// var connections = new List(arr.Length); -// RedisConnection preferred = null; - -// try -// { -// var infos = new List>(arr.Length); -// var tiebreakers = new List>(arr.Length); -// foreach (var option in arr) -// { -// if (option.IsNullOrWhiteSpace()) continue; - -// RedisConnection conn = null; -// try -// { - -// var parts = option.Split(':'); -// if (parts.Length == 0) continue; - -// string host = parts[0].Trim(); -// int port = 6379, tmp; -// if (parts.Length > 1 && int.TryParse(parts[1].Trim(), out tmp)) port = tmp; -// conn = new RedisConnection(host, port, syncTimeout: syncTimeout, allowAdmin: allowAdmin); - -// log.WriteLine("Opening connection to {0}:{1}...", host, port); -// conn.Open(); -// var info = conn.GetInfo(); -// var tiebreak = conn.Strings.GetString(0, TieBreakerKey); -// connections.Add(conn); -// infos.Add(info); -// tiebreakers.Add(tiebreak); -// } -// catch (Exception ex) -// { -// if (conn == null) -// { -// log.WriteLine("Error parsing option \"{0}\": {1}", option, ex.Message); -// } -// else -// { -// log.WriteLine("Error connecting: {0}", ex.Message); -// } -// } -// } -// List masters = new List(), slaves = new List(); -// var breakerScores = new Dictionary(); -// foreach (var tiebreak in tiebreakers) -// { -// try -// { -// if (tiebreak.Wait(syncTimeout)) -// { -// string key = tiebreak.Result; -// if (key.IsNullOrWhiteSpace()) continue; -// int score; -// if (breakerScores.TryGetValue(key, out score)) breakerScores[key] = score + 1; -// else breakerScores.Add(key, 1); -// } -// } -// catch { /* if a node is down, that's fine too */ } -// } -// // check for tie-breakers (i.e. when we store which is the master) -// switch (breakerScores.Count) -// { -// case 0: -// log.WriteLine("No tie-breakers found"); -// break; -// case 1: -// log.WriteLine("Tie-breaker is unanimous: {0}", breakerScores.Keys.Single()); -// break; -// default: -// log.WriteLine("Ambiguous tie-breakers:"); -// foreach (var kvp in breakerScores.OrderByDescending(x => x.Value)) -// { -// log.WriteLine("\t{0}: {1}", kvp.Key, kvp.Value); -// } -// break; -// } - -// for (int i = 0; i < connections.Count; i++) -// { -// log.WriteLine("Reading configuration from {0}:{1}...", connections[i].Host, connections[i].Port); -// try -// { -// if (!infos[i].Wait(syncTimeout)) -// { -// log.WriteLine("\tTimeout fetching INFO"); -// continue; -// } -// var infoPairs = new StringDictionary(); -// using (var sr = new StringReader(infos[i].Result)) -// { -// string line; -// while ((line = sr.ReadLine()) != null) -// { -// int idx = line.IndexOf(':'); -// if (idx < 0) continue; -// string key = line.Substring(0, idx).Trim(), -// value = line.Substring(idx + 1, line.Length - (idx + 1)).Trim(); -// infoPairs[key] = value; -// } -// } -// string role = infoPairs["role"]; -// switch (role) -// { -// case "slave": -// log.WriteLine("\tServer is SLAVE of {0}:{1}", -// infoPairs["master_host"], infoPairs["master_port"]); -// log.Write("\tLink is {0}, seen {1} seconds ago", -// infoPairs["master_link_status"], infoPairs["master_last_io_seconds_ago"]); -// if (infoPairs["master_sync_in_progress"] == "1") log.Write(" (sync is in progress)"); -// log.WriteLine(); -// slaves.Add(connections[i]); -// break; -// case "master": -// log.WriteLine("\tServer is MASTER, with {0} slaves", infoPairs["connected_slaves"]); -// masters.Add(connections[i]); -// break; -// default: -// log.WriteLine("\tUnknown role: {0}", role); -// break; -// } -// string tmp = infoPairs["connected_clients"]; -// int clientCount, channelCount, patternCount; -// if (tmp.IsNullOrWhiteSpace() || !int.TryParse(tmp, out clientCount)) clientCount = -1; -// tmp = infoPairs["pubsub_channels"]; -// if (tmp.IsNullOrWhiteSpace(tmp) || !int.TryParse(tmp, out channelCount)) channelCount = -1; -// tmp = infoPairs["pubsub_patterns"]; -// if (tmp.IsNullOrWhiteSpace(tmp) || !int.TryParse(tmp, out patternCount)) patternCount = -1; -// log.WriteLine("\tClients: {0}; channels: {1}; patterns: {2}", clientCount, channelCount, patternCount); -// } -// catch (Exception ex) -// { -// log.WriteLine("\tError reading INFO results: {0}", ex.Message); -// } -// } - -// if (newMaster == null) -// { -// switch (masters.Count) -// { -// case 0: -// switch (slaves.Count) -// { -// case 0: -// log.WriteLine("No masters or slaves found"); -// break; -// case 1: -// log.WriteLine("No masters found; selecting single slave"); -// preferred = slaves[0]; -// break; -// default: -// log.WriteLine("No masters found; considering {0} slaves...", slaves.Count); -// preferred = SelectWithTieBreak(log, slaves, breakerScores); -// break; -// } -// if (preferred != null) -// { -// if (autoMaster) -// { -// //LogException("Promoting redis SLAVE to MASTER"); -// log.WriteLine("Promoting slave to master..."); -// if (allowAdmin) -// { // can do on this connection -// preferred.Wait(preferred.Server.MakeMaster()); -// } -// else -// { // need an admin connection for this -// using (var adminPreferred = new RedisConnection(preferred.Host, preferred.Port, allowAdmin: true, syncTimeout: syncTimeout)) -// { -// adminPreferred.Open(); -// adminPreferred.Wait(adminPreferred.Server.MakeMaster()); -// } -// } -// } -// else -// { -// log.WriteLine("Slave should be promoted to master (but not done yet)..."); -// } -// } -// break; -// case 1: -// log.WriteLine("One master found; selecting"); -// preferred = masters[0]; -// break; -// default: -// log.WriteLine("Considering {0} masters...", masters.Count); -// preferred = SelectWithTieBreak(log, masters, breakerScores); -// break; -// } - - -// } -// else -// { // we have been instructed to change master server -// preferred = masters.Concat(slaves).FirstOrDefault(conn => (conn.Host + ":" + conn.Port) == newMaster); -// if (preferred == null) -// { -// log.WriteLine("Selected new master not available: {0}", newMaster); -// } -// else -// { -// int errorCount = 0; -// try -// { -// log.WriteLine("Promoting to master: {0}:{1}...", preferred.Host, preferred.Port); -// preferred.Wait(preferred.Server.MakeMaster()); -// preferred.Strings.Set(0, TieBreakerKey, newMaster); -// preferred.Wait(preferred.Publish(RedisMasterChangedChannel, newMaster)); -// } -// catch (Exception ex) -// { -// log.WriteLine("\t{0}", ex.Message); -// errorCount++; -// } - -// if (errorCount == 0) // only make slaves if the master was happy -// { -// foreach (var conn in masters.Concat(slaves)) -// { -// if (conn == preferred) continue; // can't make self a slave! - -// try -// { -// log.WriteLine("Enslaving: {0}:{1}...", conn.Host, conn.Port); -// // set the tie-breaker **first** in case of problems -// conn.Strings.Set(0, TieBreakerKey, newMaster); -// // and broadcast to anyone who thinks this is the master -// conn.Publish(RedisMasterChangedChannel, newMaster); -// // now make it a slave -// conn.Wait(conn.Server.MakeSlave(preferred.Host, preferred.Port)); -// } -// catch (Exception ex) -// { -// log.WriteLine("\t{0}", ex.Message); -// errorCount++; -// } -// } -// } -// if (errorCount != 0) -// { -// log.WriteLine("Things didn't go smoothly; CHECK WHAT HAPPENED!"); -// } - -// // want the connection disposed etc -// preferred = null; -// } -// } - -// if (preferred == null) -// { -// selectedConfiguration = null; -// } -// else -// { -// selectedConfiguration = preferred.Host + ":" + preferred.Port; -// log.WriteLine("Selected server {0}", selectedConfiguration); -// } - -// availableEndpoints = (from conn in masters.Concat(slaves) -// select conn.Host + ":" + conn.Port).ToArray(); -// return preferred; -// } -// finally -// { -// foreach (var conn in connections) -// { -// if (conn != null && conn != preferred) try { conn.Dispose(); } -// catch { } -// } -// } -// } - -// } - -// public static class ConnectionUtilsExtensions -// { -// public static bool IsNullOrWhiteSpace(this string str) -// { -// return str == null || str.Trim().Length == 0; -// } -// } -//} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.Async.cs b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.Async.cs new file mode 100644 index 00000000..0c75d760 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.Async.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; + +namespace ServiceStack.Redis.Generic +{ + /// + /// A complete redis command, with method to send command, receive response, and run callback on success or failure + /// + internal partial class QueuedRedisTypedCommand : QueuedRedisOperation + { + private Delegate _asyncReturnCommand; + partial void OnExecuteThrowIfAsync() + { + if (_asyncReturnCommand is object) + { + throw new InvalidOperationException("An async return command was present, but the queued operation is being processed synchronously"); + } + } + private QueuedRedisTypedCommand SetAsyncReturnCommand(Delegate value) + { + if (_asyncReturnCommand is object && _asyncReturnCommand != value) + throw new InvalidOperationException("Only a single async return command can be assigned"); + _asyncReturnCommand = value; + return this; + } + + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> VoidReturnCommandAsync) + => SetAsyncReturnCommand(VoidReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> IntReturnCommandAsync) + => SetAsyncReturnCommand(IntReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> LongReturnCommandAsync) + => SetAsyncReturnCommand(LongReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> BoolReturnCommandAsync) + => SetAsyncReturnCommand(BoolReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> BytesReturnCommandAsync) + => SetAsyncReturnCommand(BytesReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> MultiBytesReturnCommandAsync) + => SetAsyncReturnCommand(MultiBytesReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> StringReturnCommandAsync) + => SetAsyncReturnCommand(StringReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask>> MultiStringReturnCommandAsync) + => SetAsyncReturnCommand(MultiStringReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> DoubleReturnCommandAsync) + => SetAsyncReturnCommand(DoubleReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask>> MultiObjectReturnCommandAsync) + => SetAsyncReturnCommand(MultiObjectReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> ObjectReturnCommandAsync) + => SetAsyncReturnCommand(ObjectReturnCommandAsync); + + public ValueTask ExecuteAsync(IRedisTypedClientAsync client) + { + try + { + switch (_asyncReturnCommand) + { + case null: + ExecuteThrowIfSync(); + return default; + case Func, ValueTask> VoidReturnCommandAsync: + return VoidReturnCommandAsync(client); + case Func, ValueTask> IntReturnCommandAsync: + return IntReturnCommandAsync(client).Await(); + case Func, ValueTask> LongReturnCommandAsync: + return LongReturnCommandAsync(client).Await(); + case Func, ValueTask> DoubleReturnCommandAsync: + return DoubleReturnCommandAsync(client).Await(); + case Func, ValueTask> BytesReturnCommandAsync: + return BytesReturnCommandAsync(client).Await(); + case Func, ValueTask> StringReturnCommandAsync: + return StringReturnCommandAsync(client).Await(); + case Func, ValueTask> MultiBytesReturnCommandAsync: + return MultiBytesReturnCommandAsync(client).Await(); + case Func, ValueTask>> MultiStringReturnCommandAsync: + return MultiStringReturnCommandAsync(client).Await(); + case object obj: + ExecuteThrowIfSync(); + return default; + } + } + catch (Exception ex) + { + Log.Error(ex); + return default; // non-async version swallows + } + } + + protected void ExecuteThrowIfSync() + { + if (VoidReturnCommand is object + || IntReturnCommand is object + || LongReturnCommand is object + || BoolReturnCommand is object + || BytesReturnCommand is object + || MultiBytesReturnCommand is object + || StringReturnCommand is object + || MultiStringReturnCommand is object + || DoubleReturnCommand is object + || MultiObjectReturnCommand is object + || ObjectReturnCommand is object) + { + throw new InvalidOperationException("A sync return command was present, but the queued operation is being processed asynchronously"); + } + } + + } +} diff --git a/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs index ca8b2870..fe9c8b0b 100644 --- a/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs +++ b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs @@ -1,14 +1,14 @@ -using System; +using ServiceStack.Redis.Pipeline; +using System; using System.Collections.Generic; -using ServiceStack.Redis.Generic; -using ServiceStack.Redis.Pipeline; +using System.Threading.Tasks; namespace ServiceStack.Redis.Generic { /// /// A complete redis command, with method to send command, receive response, and run callback on success or failure /// - internal class QueuedRedisTypedCommand : QueuedRedisOperation + internal partial class QueuedRedisTypedCommand : QueuedRedisOperation { public Action> VoidReturnCommand { get; set; } @@ -74,5 +74,7 @@ public void Execute(IRedisTypedClient client) } } + private void ExecuteThrowIfAsync() => OnExecuteThrowIfAsync(); + partial void OnExecuteThrowIfAsync(); } } diff --git a/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.Async.cs new file mode 100644 index 00000000..86de9a27 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.Async.cs @@ -0,0 +1,55 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientHash + : IRedisHashAsync + { + IRedisTypedClientAsync AsyncClient => client; + + ValueTask IRedisHashAsync.AddAsync(KeyValuePair item, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(this, item.Key, item.Value, token).Await(); + + ValueTask IRedisHashAsync.AddAsync(TKey key, T value, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(this, key, value, token).Await(); + + ValueTask IRedisHashAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveEntryAsync(new[] { this }, token).Await(); + + ValueTask IRedisHashAsync.ContainsKeyAsync(TKey key, CancellationToken token) + => AsyncClient.HashContainsEntryAsync(this, key, token); + + ValueTask IRedisHashAsync.CountAsync(CancellationToken token) + => AsyncClient.GetHashCountAsync(this, token).AsInt32(); + + ValueTask> IRedisHashAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllEntriesFromHashAsync(this, token); + + async IAsyncEnumerator> IAsyncEnumerable>.GetAsyncEnumerator(CancellationToken token) + { + var all = await AsyncClient.GetAllEntriesFromHashAsync(this, token).ConfigureAwait(false); + foreach (var pair in all) + { + yield return pair; + } + } + + ValueTask IRedisHashAsync.RemoveAsync(TKey key, CancellationToken token) + => AsyncClient.RemoveEntryFromHashAsync(this, key, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs index a8c1d560..20fd91bc 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -16,134 +16,134 @@ namespace ServiceStack.Redis.Generic { - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientHash - : IRedisHash - { - private readonly RedisTypedClient client; - private readonly string hashId; - - public RedisClientHash(RedisTypedClient client, string hashId) - { - this.client = client; - this.hashId = hashId; - } - - public string Id - { - get { return this.hashId; } - } - - public IEnumerator> GetEnumerator() - { - return client.GetAllEntriesFromHash(this).GetEnumerator(); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public Dictionary GetAll() - { - return client.GetAllEntriesFromHash(this); - } - - public void Add(KeyValuePair item) - { - client.SetEntryInHash(this, item.Key, item.Value); - } - - public void Clear() - { - client.RemoveEntry(this); - } - - public bool Contains(KeyValuePair item) - { - var value = client.GetValueFromHash(this, item.Key); - return !Equals(value, default(T)) && Equals(value, item.Value); - } - - public void CopyTo(KeyValuePair[] array, int arrayIndex) - { - var allItemsInHash = client.GetAllEntriesFromHash(this); - - var i = arrayIndex; - foreach (var entry in allItemsInHash) - { - if (i >= array.Length) return; - array[i] = entry; - } - } - - public bool Remove(KeyValuePair item) - { - return Contains(item) && client.RemoveEntryFromHash(this, item.Key); - } - - public int Count - { - get { return client.GetHashCount(this); } - } - - public bool IsReadOnly - { - get { return false; } - } - - public bool ContainsKey(TKey key) - { - return client.HashContainsEntry(this, key); - } - - public void Add(TKey key, T value) - { - client.SetEntryInHash(this, key, value); - } - - public bool Remove(TKey key) - { - return client.RemoveEntryFromHash(this, key); - } - - public bool TryGetValue(TKey key, out T value) - { - if (ContainsKey(key)) - { - value = client.GetValueFromHash(this, key); - return true; - } - value = default(T); - return false; - } - - public T this[TKey key] - { - get { return client.GetValueFromHash(this, key); } - set { client.SetEntryInHash(this, key, value); } - } - - public ICollection Keys - { - get { return client.GetHashKeys(this); } - } - - public ICollection Values - { - get { return client.GetHashValues(this); } - } - - public List GetAllKeys() - { - return client.GetHashKeys(this); - } - - public List GetAllValues() - { - return client.GetHashValues(this); - } - } + /// + /// Wrap the common redis set operations under a ICollection[string] interface. + /// + internal partial class RedisClientHash + : IRedisHash + { + private readonly RedisTypedClient client; + private readonly string hashId; + + public RedisClientHash(RedisTypedClient client, string hashId) + { + this.client = client; + this.hashId = hashId; + } + + public string Id + { + get { return this.hashId; } + } + + public IEnumerator> GetEnumerator() + { + return client.GetAllEntriesFromHash(this).GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public Dictionary GetAll() + { + return client.GetAllEntriesFromHash(this); + } + + public void Add(KeyValuePair item) + { + client.SetEntryInHash(this, item.Key, item.Value); + } + + public void Clear() + { + client.RemoveEntry(this); + } + + public bool Contains(KeyValuePair item) + { + var value = client.GetValueFromHash(this, item.Key); + return !Equals(value, default(T)) && Equals(value, item.Value); + } + + public void CopyTo(KeyValuePair[] array, int arrayIndex) + { + var allItemsInHash = client.GetAllEntriesFromHash(this); + + var i = arrayIndex; + foreach (var entry in allItemsInHash) + { + if (i >= array.Length) return; + array[i] = entry; + } + } + + public bool Remove(KeyValuePair item) + { + return Contains(item) && client.RemoveEntryFromHash(this, item.Key); + } + + public int Count + { + get { return (int)client.GetHashCount(this); } + } + + public bool IsReadOnly + { + get { return false; } + } + + public bool ContainsKey(TKey key) + { + return client.HashContainsEntry(this, key); + } + + public void Add(TKey key, T value) + { + client.SetEntryInHash(this, key, value); + } + + public bool Remove(TKey key) + { + return client.RemoveEntryFromHash(this, key); + } + + public bool TryGetValue(TKey key, out T value) + { + if (ContainsKey(key)) + { + value = client.GetValueFromHash(this, key); + return true; + } + value = default(T); + return false; + } + + public T this[TKey key] + { + get { return client.GetValueFromHash(this, key); } + set { client.SetEntryInHash(this, key, value); } + } + + public ICollection Keys + { + get { return client.GetHashKeys(this); } + } + + public ICollection Values + { + get { return client.GetHashValues(this); } + } + + public List GetAllKeys() + { + return client.GetHashKeys(this); + } + + public List GetAllValues() + { + return client.GetHashValues(this); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientList.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.Async.cs new file mode 100644 index 00000000..66b3e596 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.Async.cs @@ -0,0 +1,184 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientList + : IRedisListAsync + { + IRedisTypedClientAsync AsyncClient => client; + IRedisListAsync AsAsync() => this; + + async ValueTask IRedisListAsync.AddRangeAsync(IEnumerable values, CancellationToken token) + { + //TODO: replace it with a pipeline implementation ala AddRangeToSet + foreach (var value in values) + { + await AsyncClient.AddItemToListAsync(this, value, token).ConfigureAwait(false); + } + } + + ValueTask IRedisListAsync.AppendAsync(T value, CancellationToken token) + => AsyncClient.AddItemToListAsync(this, value, token); + + ValueTask IRedisListAsync.BlockingDequeueAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingDequeueItemFromListAsync(this, timeOut, token); + + ValueTask IRedisListAsync.BlockingPopAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingPopItemFromListAsync(this, timeOut, token); + + ValueTask IRedisListAsync.BlockingRemoveStartAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingRemoveStartFromListAsync(this, timeOut, token); + + ValueTask IRedisListAsync.CountAsync(CancellationToken token) + => AsyncClient.GetListCountAsync(this, token).AsInt32(); + + ValueTask IRedisListAsync.DequeueAsync(CancellationToken token) + => AsyncClient.DequeueItemFromListAsync(this, token); + + ValueTask IRedisListAsync.EnqueueAsync(T value, CancellationToken token) + => AsyncClient.EnqueueItemOnListAsync(this, value, token); + + ValueTask> IRedisListAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromListAsync(this, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromListAsync(this, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator() + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetRangeFromListAsync(this, skip, PageLimit, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask> IRedisListAsync.GetRangeAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetRangeFromListAsync(this, startingFrom, endingAt, token); + + ValueTask> IRedisListAsync.GetRangeFromSortedListAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.SortListAsync(this, startingFrom, endingAt, token); + + ValueTask IRedisListAsync.PopAndPushAsync(IRedisListAsync toList, CancellationToken token) + => AsyncClient.PopAndPushItemBetweenListsAsync(this, toList, token); + + ValueTask IRedisListAsync.PopAsync(CancellationToken token) + => AsyncClient.PopItemFromListAsync(this, token); + + ValueTask IRedisListAsync.PrependAsync(T value, CancellationToken token) + => AsyncClient.PrependItemToListAsync(this, value, token); + + ValueTask IRedisListAsync.PushAsync(T value, CancellationToken token) + => AsyncClient.PushItemToListAsync(this, value, token); + + ValueTask IRedisListAsync.RemoveAllAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(this, token); + + ValueTask IRedisListAsync.RemoveEndAsync(CancellationToken token) + => AsyncClient.RemoveEndFromListAsync(this, token); + + ValueTask IRedisListAsync.RemoveStartAsync(CancellationToken token) + => AsyncClient.RemoveStartFromListAsync(this, token); + + ValueTask IRedisListAsync.RemoveValueAsync(T value, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(this, value, token); + + ValueTask IRedisListAsync.RemoveValueAsync(T value, int noOfMatches, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(this, value, noOfMatches, token); + + ValueTask IRedisListAsync.TrimAsync(int keepStartingFrom, int keepEndingAt, CancellationToken token) + => AsyncClient.TrimListAsync(this, keepStartingFrom, keepEndingAt, token); + + async ValueTask IRedisListAsync.RemoveAsync(T value, CancellationToken token) + { + var index = await AsAsync().IndexOfAsync(value, token).ConfigureAwait(false); + if (index != -1) + { + await AsAsync().RemoveAtAsync(index, token).ConfigureAwait(false); + return true; + } + return false; + } + + ValueTask IRedisListAsync.AddAsync(T value, CancellationToken token) + => AsyncClient.AddItemToListAsync(this, value, token); + + async ValueTask IRedisListAsync.RemoveAtAsync(int index, CancellationToken token) + { + //TODO: replace with native implementation when one exists + + var nativeClient = client.NativeClient as IRedisNativeClientAsync ?? throw new NotSupportedException( + $"The native client ('{client.NativeClient.GetType().Name}') does not implement {nameof(IRedisNativeClientAsync)}"); + + var markForDelete = Guid.NewGuid().ToString(); + await nativeClient.LSetAsync(listId, index, Encoding.UTF8.GetBytes(markForDelete), token).ConfigureAwait(false); + + const int removeAll = 0; + await nativeClient.LRemAsync(listId, removeAll, Encoding.UTF8.GetBytes(markForDelete), token).ConfigureAwait(false); + } + + async ValueTask IRedisListAsync.ContainsAsync(T value, CancellationToken token) + { + //TODO: replace with native implementation when exists + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (Equals(existingItem, value)) return true; + } + return false; + } + + ValueTask IRedisListAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(this, token); + + async ValueTask IRedisListAsync.IndexOfAsync(T value, CancellationToken token) + { + //TODO: replace with native implementation when exists + var i = 0; + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (Equals(existingItem, value)) return i; + i++; + } + return -1; + } + + ValueTask IRedisListAsync.ElementAtAsync(int index, CancellationToken token) + => AsyncClient.GetItemFromListAsync(this, index, token); + + ValueTask IRedisListAsync.SetValueAsync(int index, T value, CancellationToken token) + => AsyncClient.SetItemInListAsync(this, index, value, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs index 208470f7..61cd0049 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -17,79 +17,79 @@ namespace ServiceStack.Redis.Generic { - internal class RedisClientList - : IRedisList - { - private readonly RedisTypedClient client; - private readonly string listId; - private const int PageLimit = 1000; - - public RedisClientList(RedisTypedClient client, string listId) - { - this.listId = listId; - this.client = client; - } - - public string Id - { - get { return listId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromList(this).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetRangeFromList(this, skip, PageLimit); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(T item) - { - client.AddItemToList(this, item); - } - - public void Clear() - { - client.RemoveAllFromList(this); - } - - public bool Contains(T item) - { - //TODO: replace with native implementation when exists - foreach (var existingItem in this) - { - if (Equals(existingItem, item)) return true; - } - return false; - } - - public void CopyTo(T[] array, int arrayIndex) - { - var allItemsInList = client.GetAllItemsFromList(this); - allItemsInList.CopyTo(array, arrayIndex); - } - - public bool Remove(T item) - { + internal partial class RedisClientList + : IRedisList + { + private readonly RedisTypedClient client; + private readonly string listId; + private const int PageLimit = 1000; + + public RedisClientList(RedisTypedClient client, string listId) + { + this.listId = listId; + this.client = client; + } + + public string Id + { + get { return listId; } + } + + public IEnumerator GetEnumerator() + { + return this.Count <= PageLimit + ? client.GetAllItemsFromList(this).GetEnumerator() + : GetPagingEnumerator(); + } + + public IEnumerator GetPagingEnumerator() + { + var skip = 0; + List pageResults; + do + { + pageResults = client.GetRangeFromList(this, skip, PageLimit); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(T item) + { + client.AddItemToList(this, item); + } + + public void Clear() + { + client.RemoveAllFromList(this); + } + + public bool Contains(T item) + { + //TODO: replace with native implementation when exists + foreach (var existingItem in this) + { + if (Equals(existingItem, item)) return true; + } + return false; + } + + public void CopyTo(T[] array, int arrayIndex) + { + var allItemsInList = client.GetAllItemsFromList(this); + allItemsInList.CopyTo(array, arrayIndex); + } + + public bool Remove(T item) + { var index = this.IndexOf(item); if (index != -1) { @@ -97,154 +97,154 @@ public bool Remove(T item) return true; } return false; - } - - public int Count - { - get - { - return client.GetListCount(this); - } - } - - public bool IsReadOnly { get { return false; } } - - public int IndexOf(T item) - { - //TODO: replace with native implementation when exists - var i = 0; - foreach (var existingItem in this) - { - if (Equals(existingItem, item)) return i; - i++; - } - return -1; - } - - public void Insert(int index, T item) - { + } + + public int Count + { + get + { + return (int)client.GetListCount(this); + } + } + + public bool IsReadOnly { get { return false; } } + + public int IndexOf(T item) + { + //TODO: replace with native implementation when exists + var i = 0; + foreach (var existingItem in this) + { + if (Equals(existingItem, item)) return i; + i++; + } + return -1; + } + + public void Insert(int index, T item) + { client.InsertAfterItemInList(this, this[index], item); - } - - public void RemoveAt(int index) - { - //TODO: replace with native implementation when one exists - var markForDelete = Guid.NewGuid().ToString(); - client.NativeClient.LSet(listId, index, Encoding.UTF8.GetBytes(markForDelete)); - - const int removeAll = 0; - client.NativeClient.LRem(listId, removeAll, Encoding.UTF8.GetBytes(markForDelete)); - } - - public T this[int index] - { - get { return client.GetItemFromList(this, index); } - set { client.SetItemInList(this, index, value); } - } - - public List GetAll() - { - return client.GetAllItemsFromList(this); - } - - public List GetRange(int startingFrom, int endingAt) - { - return client.GetRangeFromList(this, startingFrom, endingAt); - } - - public List GetRangeFromSortedList(int startingFrom, int endingAt) - { - return client.SortList(this, startingFrom, endingAt); - } - - public void RemoveAll() - { - client.RemoveAllFromList(this); - } - - public void Trim(int keepStartingFrom, int keepEndingAt) - { - client.TrimList(this, keepStartingFrom, keepEndingAt); - } - - public int RemoveValue(T value) - { - return client.RemoveItemFromList(this, value); - } - - public int RemoveValue(T value, int noOfMatches) - { - return client.RemoveItemFromList(this, value, noOfMatches); - } - - public void AddRange(IEnumerable values) - { - client.AddRangeToList(this, values); - } - - public void Append(T value) - { - Add(value); - } - - public void Prepend(T value) - { - client.PrependItemToList(this, value); - } - - public T RemoveStart() - { - return client.RemoveStartFromList(this); - } - - public T BlockingRemoveStart(TimeSpan? timeOut) - { - return client.BlockingRemoveStartFromList(this, timeOut); - } - - public T RemoveEnd() - { - return client.RemoveEndFromList(this); - } - - public void Enqueue(T value) - { - client.EnqueueItemOnList(this, value); - } - - public T Dequeue() - { - return client.DequeueItemFromList(this); - } - - public T BlockingDequeue(TimeSpan? timeOut) - { - return client.BlockingDequeueItemFromList(this, timeOut); - } - - public void Push(T value) - { - client.PushItemToList(this, value); - } - - public T Pop() - { - return client.PopItemFromList(this); - } - - public T BlockingPop(TimeSpan? timeOut) - { - return client.BlockingPopItemFromList(this, timeOut); - } - - public T PopAndPush(IRedisList toList) - { - return client.PopAndPushItemBetweenLists(this, toList); - } - - public T BlockingPopAndPush(IRedisList toList, TimeSpan? timeOut) - { - return client.BlockingPopAndPushItemBetweenLists(this, toList, timeOut); - } - } + } + + public void RemoveAt(int index) + { + //TODO: replace with native implementation when one exists + var markForDelete = Guid.NewGuid().ToString(); + client.NativeClient.LSet(listId, index, Encoding.UTF8.GetBytes(markForDelete)); + + const int removeAll = 0; + client.NativeClient.LRem(listId, removeAll, Encoding.UTF8.GetBytes(markForDelete)); + } + + public T this[int index] + { + get { return client.GetItemFromList(this, index); } + set { client.SetItemInList(this, index, value); } + } + + public List GetAll() + { + return client.GetAllItemsFromList(this); + } + + public List GetRange(int startingFrom, int endingAt) + { + return client.GetRangeFromList(this, startingFrom, endingAt); + } + + public List GetRangeFromSortedList(int startingFrom, int endingAt) + { + return client.SortList(this, startingFrom, endingAt); + } + + public void RemoveAll() + { + client.RemoveAllFromList(this); + } + + public void Trim(int keepStartingFrom, int keepEndingAt) + { + client.TrimList(this, keepStartingFrom, keepEndingAt); + } + + public long RemoveValue(T value) + { + return client.RemoveItemFromList(this, value); + } + + public long RemoveValue(T value, int noOfMatches) + { + return client.RemoveItemFromList(this, value, noOfMatches); + } + + public void AddRange(IEnumerable values) + { + client.AddRangeToList(this, values); + } + + public void Append(T value) + { + Add(value); + } + + public void Prepend(T value) + { + client.PrependItemToList(this, value); + } + + public T RemoveStart() + { + return client.RemoveStartFromList(this); + } + + public T BlockingRemoveStart(TimeSpan? timeOut) + { + return client.BlockingRemoveStartFromList(this, timeOut); + } + + public T RemoveEnd() + { + return client.RemoveEndFromList(this); + } + + public void Enqueue(T value) + { + client.EnqueueItemOnList(this, value); + } + + public T Dequeue() + { + return client.DequeueItemFromList(this); + } + + public T BlockingDequeue(TimeSpan? timeOut) + { + return client.BlockingDequeueItemFromList(this, timeOut); + } + + public void Push(T value) + { + client.PushItemToList(this, value); + } + + public T Pop() + { + return client.PopItemFromList(this); + } + + public T BlockingPop(TimeSpan? timeOut) + { + return client.BlockingPopItemFromList(this, timeOut); + } + + public T PopAndPush(IRedisList toList) + { + return client.PopAndPushItemBetweenLists(this, toList); + } + + public T BlockingPopAndPush(IRedisList toList, TimeSpan? timeOut) + { + return client.BlockingPopAndPushItemBetweenLists(this, toList, timeOut); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.Async.cs new file mode 100644 index 00000000..0d0ba19b --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.Async.cs @@ -0,0 +1,109 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientSet + : IRedisSetAsync + { + IRedisTypedClientAsync AsyncClient => client; + + ValueTask IRedisSetAsync.AddAsync(T value, CancellationToken token) + => AsyncClient.AddItemToSetAsync(this, value, token); + + IRedisSetAsync AsAsync() => this; + + ValueTask IRedisSetAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveEntryAsync(setId, token).Await(); + + ValueTask IRedisSetAsync.ContainsAsync(T item, CancellationToken token) + => AsyncClient.SetContainsItemAsync(this, item, token); + + ValueTask IRedisSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSetCountAsync(this, token).AsInt32(); + + ValueTask> IRedisSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSetAsync(this, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromSetAsync(this, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetSortedEntryValuesAsync(this, skip, skip + PageLimit - 1, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask IRedisSetAsync.GetDifferencesAsync(IRedisSetAsync[] withSets, CancellationToken token) + => AsyncClient.StoreUnionFromSetsAsync(this, withSets, token); + + ValueTask IRedisSetAsync.GetDifferencesAsync(params IRedisSetAsync[] withSets) + => AsAsync().GetDifferencesAsync(withSets, token: default); + + ValueTask IRedisSetAsync.GetRandomItemAsync(CancellationToken token) + => AsyncClient.GetRandomItemFromSetAsync(this, token); + + ValueTask IRedisSetAsync.MoveToAsync(T item, IRedisSetAsync toSet, CancellationToken token) + => AsyncClient.MoveBetweenSetsAsync(this, toSet, item, token); + + ValueTask IRedisSetAsync.PopRandomItemAsync(CancellationToken token) + => AsyncClient.PopItemFromSetAsync(this, token); + + ValueTask IRedisSetAsync.PopulateWithDifferencesOfAsync(IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + => AsyncClient.StoreDifferencesFromSetAsync(this, fromSet, withSets, token); + + ValueTask IRedisSetAsync.PopulateWithDifferencesOfAsync(IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().PopulateWithDifferencesOfAsync(fromSet, withSets, token: default); + + ValueTask IRedisSetAsync.PopulateWithIntersectOfAsync(IRedisSetAsync[] sets, CancellationToken token) + => AsyncClient.StoreIntersectFromSetsAsync(this, sets, token); + + ValueTask IRedisSetAsync.PopulateWithIntersectOfAsync(params IRedisSetAsync[] sets) + => AsAsync().PopulateWithIntersectOfAsync(sets, token: default); + + ValueTask IRedisSetAsync.PopulateWithUnionOfAsync(IRedisSetAsync[] sets, CancellationToken token) + => AsyncClient.StoreUnionFromSetsAsync(this, sets, token); + + ValueTask IRedisSetAsync.PopulateWithUnionOfAsync(params IRedisSetAsync[] sets) + => AsAsync().PopulateWithUnionOfAsync(sets, token: default); + + ValueTask IRedisSetAsync.RemoveAsync(T value, CancellationToken token) + => AsyncClient.RemoveItemFromSetAsync(this, value, token).AwaitAsTrue(); // see Remove for why "true" + + ValueTask> IRedisSetAsync.SortAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetSortedEntryValuesAsync(this, startingFrom, endingAt, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs index b593a4ce..5c48dd6a 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -16,135 +16,135 @@ namespace ServiceStack.Redis.Generic { - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientSet - : IRedisSet - { - private readonly RedisTypedClient client; - private readonly string setId; - private const int PageLimit = 1000; - - public RedisClientSet(RedisTypedClient client, string setId) - { - this.client = client; - this.setId = setId; - } - - public string Id - { - get { return this.setId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromSet(this).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetSortedEntryValues(this, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(T item) - { - client.AddItemToSet(this, item); - } - - public void Clear() - { - client.RemoveEntry(setId); - } - - public bool Contains(T item) - { - return client.SetContainsItem(this, item); - } - - public void CopyTo(T[] array, int arrayIndex) - { - var allItemsInSet = client.GetAllItemsFromSet(this); - allItemsInSet.CopyTo(array, arrayIndex); - } - - public bool Remove(T item) - { - client.RemoveItemFromSet(this, item); - return true; - } - - public int Count - { - get - { - var setCount = client.GetSetCount(this); - return setCount; - } - } - - public bool IsReadOnly { get { return false; } } - - public List Sort(int startingFrom, int endingAt) - { - return client.GetSortedEntryValues(this, startingFrom, endingAt); - } - - public HashSet GetAll() - { - return client.GetAllItemsFromSet(this); - } - - public T PopRandomItem() - { - return client.PopItemFromSet(this); - } - - public T GetRandomItem() - { - return client.GetRandomItemFromSet(this); - } - - public void MoveTo(T item, IRedisSet toSet) - { - client.MoveBetweenSets(this, toSet, item); - } - - public void PopulateWithIntersectOf(params IRedisSet[] sets) - { - client.StoreIntersectFromSets(this, sets); - } - - public void PopulateWithUnionOf(params IRedisSet[] sets) - { - client.StoreUnionFromSets(this, sets); - } - - public void GetDifferences(params IRedisSet[] withSets) - { - client.StoreUnionFromSets(this, withSets); - } - - public void PopulateWithDifferencesOf(IRedisSet fromSet, params IRedisSet[] withSets) - { - client.StoreDifferencesFromSet(this, fromSet, withSets); - } - } + /// + /// Wrap the common redis set operations under a ICollection[string] interface. + /// + internal partial class RedisClientSet + : IRedisSet + { + private readonly RedisTypedClient client; + private readonly string setId; + private const int PageLimit = 1000; + + public RedisClientSet(RedisTypedClient client, string setId) + { + this.client = client; + this.setId = setId; + } + + public string Id + { + get { return this.setId; } + } + + public IEnumerator GetEnumerator() + { + return this.Count <= PageLimit + ? client.GetAllItemsFromSet(this).GetEnumerator() + : GetPagingEnumerator(); + } + + public IEnumerator GetPagingEnumerator() + { + var skip = 0; + List pageResults; + do + { + pageResults = client.GetSortedEntryValues(this, skip, skip + PageLimit - 1); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(T item) + { + client.AddItemToSet(this, item); + } + + public void Clear() + { + client.RemoveEntry(setId); + } + + public bool Contains(T item) + { + return client.SetContainsItem(this, item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + var allItemsInSet = client.GetAllItemsFromSet(this); + allItemsInSet.CopyTo(array, arrayIndex); + } + + public bool Remove(T item) + { + client.RemoveItemFromSet(this, item); + return true; + } + + public int Count + { + get + { + var setCount = (int)client.GetSetCount(this); + return setCount; + } + } + + public bool IsReadOnly { get { return false; } } + + public List Sort(int startingFrom, int endingAt) + { + return client.GetSortedEntryValues(this, startingFrom, endingAt); + } + + public HashSet GetAll() + { + return client.GetAllItemsFromSet(this); + } + + public T PopRandomItem() + { + return client.PopItemFromSet(this); + } + + public T GetRandomItem() + { + return client.GetRandomItemFromSet(this); + } + + public void MoveTo(T item, IRedisSet toSet) + { + client.MoveBetweenSets(this, toSet, item); + } + + public void PopulateWithIntersectOf(params IRedisSet[] sets) + { + client.StoreIntersectFromSets(this, sets); + } + + public void PopulateWithUnionOf(params IRedisSet[] sets) + { + client.StoreUnionFromSets(this, sets); + } + + public void GetDifferences(params IRedisSet[] withSets) + { + client.StoreUnionFromSets(this, withSets); + } + + public void PopulateWithDifferencesOf(IRedisSet fromSet, params IRedisSet[] withSets) + { + client.StoreDifferencesFromSet(this, fromSet, withSets); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.Async.cs new file mode 100644 index 00000000..fae91472 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.Async.cs @@ -0,0 +1,136 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientSortedSet + : IRedisSortedSetAsync + { + IRedisTypedClientAsync AsyncClient => client; + + IRedisSortedSetAsync AsAsync() => this; + + ValueTask IRedisSortedSetAsync.AddAsync(T item, double score, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(this, item, score, token); + + ValueTask IRedisSortedSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSortedSetCountAsync(this, token).AsInt32(); + + ValueTask> IRedisSortedSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetAsync(this, token); + + ValueTask> IRedisSortedSetAsync.GetAllDescendingAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetDescAsync(this, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromSortedSetAsync(this, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator(); + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetRangeFromSortedSetAsync(this, skip, skip + PageLimit - 1, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask IRedisSortedSetAsync.GetItemScoreAsync(T item, CancellationToken token) + => AsyncClient.GetItemScoreInSortedSetAsync(this, item, token); + + ValueTask> IRedisSortedSetAsync.GetRangeAsync(int fromRank, int toRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetAsync(this, fromRank, toRank, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByHighestScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(this, fromScore, toScore, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByHighestScoreAsync(double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(this, fromScore, toScore, skip, take, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByLowestScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(this, fromScore, toScore, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByLowestScoreAsync(double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(this, fromScore, toScore, skip, take, token); + + ValueTask IRedisSortedSetAsync.IncrementItemAsync(T item, double incrementBy, CancellationToken token) + => AsyncClient.IncrementItemInSortedSetAsync(this, item, incrementBy, token); + + ValueTask IRedisSortedSetAsync.IndexOfAsync(T item, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetAsync(this, item, token).AsInt32(); + + ValueTask IRedisSortedSetAsync.IndexOfDescendingAsync(T item, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetDescAsync(this, item, token); + + ValueTask IRedisSortedSetAsync.PopItemWithHighestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithHighestScoreFromSortedSetAsync(this, token); + + ValueTask IRedisSortedSetAsync.PopItemWithLowestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithLowestScoreFromSortedSetAsync(this, token); + + ValueTask IRedisSortedSetAsync.PopulateWithIntersectOfAsync(IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(this, setIds, token); + + ValueTask IRedisSortedSetAsync.PopulateWithIntersectOfAsync(IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(this, setIds, args, token); + + ValueTask IRedisSortedSetAsync.PopulateWithUnionOfAsync(IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(this, setIds, token); + + ValueTask IRedisSortedSetAsync.PopulateWithUnionOfAsync(IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(this, setIds, args, token); + + ValueTask IRedisSortedSetAsync.RemoveRangeAsync(int minRank, int maxRank, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetAsync(this, minRank, maxRank, token); + + ValueTask IRedisSortedSetAsync.RemoveRangeByScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetByScoreAsync(this, fromScore, toScore, token); + + ValueTask IRedisSortedSetAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveEntryAsync(setId, token).Await(); + + ValueTask IRedisSortedSetAsync.ContainsAsync(T value, CancellationToken token) + => AsyncClient.SortedSetContainsItemAsync(this, value, token); + + ValueTask IRedisSortedSetAsync.AddAsync(T value, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(this, value, token); + + ValueTask IRedisSortedSetAsync.RemoveAsync(T value, CancellationToken token) + => AsyncClient.RemoveItemFromSortedSetAsync(this, value, token).AwaitAsTrue(); // see Remove for why "true" + + ValueTask IRedisSortedSetAsync.PopulateWithIntersectOfAsync(params IRedisSortedSetAsync[] setIds) + => AsAsync().PopulateWithIntersectOfAsync(setIds, token: default); + + ValueTask IRedisSortedSetAsync.PopulateWithUnionOfAsync(params IRedisSortedSetAsync[] setIds) + => AsAsync().PopulateWithUnionOfAsync(setIds, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs index 14733858..5e7fdf13 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -16,180 +16,190 @@ namespace ServiceStack.Redis.Generic { - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientSortedSet - : IRedisSortedSet - { - private readonly RedisTypedClient client; - private readonly string setId; - private const int PageLimit = 1000; - - public RedisClientSortedSet(RedisTypedClient client, string setId) - { - this.client = client; - this.setId = setId; - } - - public string Id - { - get { return this.setId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromSortedSet(this).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetRangeFromSortedSet(this, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(T item) - { - client.AddItemToSortedSet(this, item); - } - - public void Add(T item, double score) - { - client.AddItemToSortedSet(this, item, score); - } - - public void Clear() - { - client.RemoveEntry(setId); - } - - public bool Contains(T item) - { - return client.SortedSetContainsItem(this, item); - } - - public void CopyTo(T[] array, int arrayIndex) - { - var allItemsInSet = client.GetAllItemsFromSortedSet(this); - allItemsInSet.CopyTo(array, arrayIndex); - } - - public bool Remove(T item) - { - client.RemoveItemFromSortedSet(this, item); - return true; - } - - public int Count - { - get - { - var setCount = client.GetSortedSetCount(this); - return setCount; - } - } - - public bool IsReadOnly { get { return false; } } - - public T PopItemWithHighestScore() - { - return client.PopItemWithHighestScoreFromSortedSet(this); - } - - public T PopItemWithLowestScore() - { - return client.PopItemWithLowestScoreFromSortedSet(this); - } - - public double IncrementItem(T item, double incrementBy) - { - return client.IncrementItemInSortedSet(this, item, incrementBy); - } - - public int IndexOf(T item) - { - return client.GetItemIndexInSortedSet(this, item); - } - - public int IndexOfDescending(T item) - { - return client.GetItemIndexInSortedSetDesc(this, item); - } - - public List GetAll() - { - return client.GetAllItemsFromSortedSet(this); - } - - public List GetAllDescending() - { - return client.GetAllItemsFromSortedSetDesc(this); - } - - public List GetRange(int fromRank, int toRank) - { - return client.GetRangeFromSortedSet(this, fromRank, toRank); - } - - public List GetRangeByLowestScore(double fromScore, double toScore) - { - return client.GetRangeFromSortedSetByLowestScore(this, fromScore, toScore); - } - - public List GetRangeByLowestScore(double fromScore, double toScore, int? skip, int? take) - { - return client.GetRangeFromSortedSetByLowestScore(this, fromScore, toScore, skip, take); - } - - public List GetRangeByHighestScore(double fromScore, double toScore) - { - return client.GetRangeFromSortedSetByHighestScore(this, fromScore, toScore); - } - - public List GetRangeByHighestScore(double fromScore, double toScore, int? skip, int? take) - { - return client.GetRangeFromSortedSetByHighestScore(this, fromScore, toScore, skip, take); - } - - public int RemoveRange(int minRank, int maxRank) - { - return client.RemoveRangeFromSortedSet(this, minRank, maxRank); - } - - public int RemoveRangeByScore(double fromScore, double toScore) - { - return client.RemoveRangeFromSortedSetByScore(this, fromScore, toScore); - } - - public double GetItemScore(T item) - { - return client.GetItemScoreInSortedSet(this, item); - } - - public int PopulateWithIntersectOf(params IRedisSortedSet[] setIds) - { - return client.StoreIntersectFromSortedSets(this, setIds); - } - - public int PopulateWithUnionOf(params IRedisSortedSet[] setIds) - { - return client.StoreUnionFromSortedSets(this, setIds); - } - } + /// + /// Wrap the common redis set operations under a ICollection[string] interface. + /// + internal partial class RedisClientSortedSet + : IRedisSortedSet + { + private readonly RedisTypedClient client; + private readonly string setId; + private const int PageLimit = 1000; + + public RedisClientSortedSet(RedisTypedClient client, string setId) + { + this.client = client; + this.setId = setId; + } + + public string Id + { + get { return this.setId; } + } + + public IEnumerator GetEnumerator() + { + return this.Count <= PageLimit + ? client.GetAllItemsFromSortedSet(this).GetEnumerator() + : GetPagingEnumerator(); + } + + public IEnumerator GetPagingEnumerator() + { + var skip = 0; + List pageResults; + do + { + pageResults = client.GetRangeFromSortedSet(this, skip, skip + PageLimit - 1); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(T item) + { + client.AddItemToSortedSet(this, item); + } + + public void Add(T item, double score) + { + client.AddItemToSortedSet(this, item, score); + } + + public void Clear() + { + client.RemoveEntry(setId); + } + + public bool Contains(T item) + { + return client.SortedSetContainsItem(this, item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + var allItemsInSet = client.GetAllItemsFromSortedSet(this); + allItemsInSet.CopyTo(array, arrayIndex); + } + + public bool Remove(T item) + { + client.RemoveItemFromSortedSet(this, item); + return true; + } + + public int Count + { + get + { + var setCount = (int)client.GetSortedSetCount(this); + return setCount; + } + } + + public bool IsReadOnly { get { return false; } } + + public T PopItemWithHighestScore() + { + return client.PopItemWithHighestScoreFromSortedSet(this); + } + + public T PopItemWithLowestScore() + { + return client.PopItemWithLowestScoreFromSortedSet(this); + } + + public double IncrementItem(T item, double incrementBy) + { + return client.IncrementItemInSortedSet(this, item, incrementBy); + } + + public int IndexOf(T item) + { + return (int)client.GetItemIndexInSortedSet(this, item); + } + + public long IndexOfDescending(T item) + { + return client.GetItemIndexInSortedSetDesc(this, item); + } + + public List GetAll() + { + return client.GetAllItemsFromSortedSet(this); + } + + public List GetAllDescending() + { + return client.GetAllItemsFromSortedSetDesc(this); + } + + public List GetRange(int fromRank, int toRank) + { + return client.GetRangeFromSortedSet(this, fromRank, toRank); + } + + public List GetRangeByLowestScore(double fromScore, double toScore) + { + return client.GetRangeFromSortedSetByLowestScore(this, fromScore, toScore); + } + + public List GetRangeByLowestScore(double fromScore, double toScore, int? skip, int? take) + { + return client.GetRangeFromSortedSetByLowestScore(this, fromScore, toScore, skip, take); + } + + public List GetRangeByHighestScore(double fromScore, double toScore) + { + return client.GetRangeFromSortedSetByHighestScore(this, fromScore, toScore); + } + + public List GetRangeByHighestScore(double fromScore, double toScore, int? skip, int? take) + { + return client.GetRangeFromSortedSetByHighestScore(this, fromScore, toScore, skip, take); + } + + public long RemoveRange(int minRank, int maxRank) + { + return client.RemoveRangeFromSortedSet(this, minRank, maxRank); + } + + public long RemoveRangeByScore(double fromScore, double toScore) + { + return client.RemoveRangeFromSortedSetByScore(this, fromScore, toScore); + } + + public double GetItemScore(T item) + { + return client.GetItemScoreInSortedSet(this, item); + } + + public long PopulateWithIntersectOf(params IRedisSortedSet[] setIds) + { + return client.StoreIntersectFromSortedSets(this, setIds); + } + + public long PopulateWithIntersectOf(IRedisSortedSet[] setIds, string[] args) + { + return client.StoreIntersectFromSortedSets(this, setIds, args); + } + + public long PopulateWithUnionOf(params IRedisSortedSet[] setIds) + { + return client.StoreUnionFromSortedSets(this, setIds); + } + + public long PopulateWithUnionOf(IRedisSortedSet[] setIds, string[] args) + { + return client.StoreUnionFromSortedSets(this, setIds, args); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient.Async.cs new file mode 100644 index 00000000..01018721 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient.Async.cs @@ -0,0 +1,765 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Internal; +using ServiceStack.Text; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + partial class RedisTypedClient + : IRedisTypedClientAsync + { + public IRedisTypedClientAsync AsAsync() => this; + + private IRedisClientAsync AsyncClient => client; + private IRedisNativeClientAsync AsyncNative => client; + + IRedisSetAsync IRedisTypedClientAsync.TypeIdsSet => TypeIdsSetRaw; + + IRedisClientAsync IRedisTypedClientAsync.RedisClient => client; + + internal ValueTask ExpectQueuedAsync(CancellationToken token) + => client.ExpectQueuedAsync(token); + + internal ValueTask ExpectOkAsync(CancellationToken token) + => client.ExpectOkAsync(token); + + internal ValueTask ReadMultiDataResultCountAsync(CancellationToken token) + => client.ReadMultiDataResultCountAsync(token); + + ValueTask IRedisTypedClientAsync.GetValueAsync(string key, CancellationToken token) + => DeserializeValueAsync(AsyncNative.GetAsync(key, token)); + + async ValueTask IRedisTypedClientAsync.SetValueAsync(string key, T entity, CancellationToken token) + { + AssertNotNull(key); + await AsyncClient.SetAsync(key, SerializeValue(entity), token).ConfigureAwait(false); + await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + } + + Task IEntityStoreAsync.GetByIdAsync(object id, CancellationToken token) + { + var key = client.UrnKey(id); + return AsAsync().GetValueAsync(key, token).AsTask(); + } + + internal ValueTask FlushSendBufferAsync(CancellationToken token) + => client.FlushSendBufferAsync(token); + + internal ValueTask AddTypeIdsRegisteredDuringPipelineAsync(CancellationToken token) + => client.AddTypeIdsRegisteredDuringPipelineAsync(token); + + async Task> IEntityStoreAsync.GetByIdsAsync(IEnumerable ids, CancellationToken token) + { + if (ids != null) + { + var urnKeys = ids.Map(x => client.UrnKey(x)); + if (urnKeys.Count != 0) + return await AsAsync().GetValuesAsync(urnKeys, token).ConfigureAwait(false); + } + + return new List(); + } + + async Task> IEntityStoreAsync.GetAllAsync(CancellationToken token) + { + var allKeys = await AsyncClient.GetAllItemsFromSetAsync(this.TypeIdsSetKey, token).ConfigureAwait(false); + return await AsAsync().GetByIdsAsync(allKeys.ToArray(), token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.StoreAsync(T entity, CancellationToken token) + { + var urnKey = client.UrnKey(entity); + await AsAsync().SetValueAsync(urnKey, entity, token).ConfigureAwait(false); + return entity; + } + + async Task IEntityStoreAsync.StoreAllAsync(IEnumerable entities, CancellationToken token) + { + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + await AsyncNative.MSetAsync(keys, values, token).ConfigureAwait(false); + await client.RegisterTypeIdsAsync(entitiesList, token).ConfigureAwait(false); + } + } + + async Task IEntityStoreAsync.DeleteAsync(T entity, CancellationToken token) + { + var urnKey = client.UrnKey(entity); + await AsyncClient.RemoveEntryAsync(new[] { urnKey }, token).ConfigureAwait(false); + await client.RemoveTypeIdsByValueAsync(entity, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdAsync(object id, CancellationToken token) + { + var urnKey = client.UrnKey(id); + + await AsyncClient.RemoveEntryAsync(new[] { urnKey }, token).ConfigureAwait(false); + await client.RemoveTypeIdsByIdAsync(id.ToString(), token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdsAsync(IEnumerable ids, CancellationToken token) + { + if (ids == null) return; + + var idStrings = ids.Cast().Select(x => x.ToString()).ToArray(); + var urnKeys = idStrings.Select(t => client.UrnKey(t)).ToArray(); + if (urnKeys.Length > 0) + { + await AsyncClient.RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + await client.RemoveTypeIdsByIdsAsync(idStrings, token).ConfigureAwait(false); + } + } + + async Task IEntityStoreAsync.DeleteAllAsync(CancellationToken token) + { + await DeleteAllAsync(0,RedisConfig.CommandKeysBatchSize, token).ConfigureAwait(false); + } + + private async Task DeleteAllAsync(ulong cursor, int batchSize, CancellationToken token) + { + do + { + var scanResult = await AsyncNative.SScanAsync(this.TypeIdsSetKey, cursor, batchSize, token: token).ConfigureAwait(false); + cursor = scanResult.Cursor; + var urnKeys = scanResult.Results.Select(x => client.UrnKey(Encoding.UTF8.GetString(x))).ToArray(); + if (urnKeys.Length > 0) + { + await AsyncClient.RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + } + } while (cursor != 0); + await AsyncClient.RemoveEntryAsync(new[] { this.TypeIdsSetKey }, token).ConfigureAwait(false); + } + + async ValueTask> IRedisTypedClientAsync.GetValuesAsync(List keys, CancellationToken token) + { + if (keys.IsNullOrEmpty()) return new List(); + + var resultBytesArray = await AsyncNative.MGetAsync(keys.ToArray(), token).ConfigureAwait(false); + return ProcessGetValues(resultBytesArray); + } + + ValueTask> IRedisTypedClientAsync.CreateTransactionAsync(CancellationToken token) + { + IRedisTypedTransactionAsync obj = new RedisTypedTransaction(this, true); + return obj.AsValueTaskResult(); + } + + IRedisTypedPipelineAsync IRedisTypedClientAsync.CreatePipeline() + => new RedisTypedPipeline(this); + + + ValueTask IRedisTypedClientAsync.AcquireLockAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.AcquireLockAsync(this.TypeLockKey, timeOut, token); + + long IRedisTypedClientAsync.Db => AsyncClient.Db; + + IHasNamed> IRedisTypedClientAsync.Lists => Lists as IHasNamed> ?? throw new NotSupportedException("The provided Lists does not support IRedisListAsync"); + IHasNamed> IRedisTypedClientAsync.Sets => Sets as IHasNamed> ?? throw new NotSupportedException("The provided Sets does not support IRedisSetAsync"); + IHasNamed> IRedisTypedClientAsync.SortedSets => SortedSets as IHasNamed> ?? throw new NotSupportedException("The provided SortedSets does not support IRedisSortedSetAsync"); + + IRedisHashAsync IRedisTypedClientAsync.GetHash(string hashId) => GetHash(hashId) as IRedisHashAsync ?? throw new NotSupportedException("The provided Hash does not support IRedisHashAsync"); + + ValueTask IRedisTypedClientAsync.SelectAsync(long db, CancellationToken token) + => AsyncClient.SelectAsync(db, token); + + ValueTask> IRedisTypedClientAsync.GetAllKeysAsync(CancellationToken token) + => AsyncClient.GetAllKeysAsync(token); + + ValueTask IRedisTypedClientAsync.SetSequenceAsync(int value, CancellationToken token) + => AsyncNative.GetSetAsync(SequenceKey, Encoding.UTF8.GetBytes(value.ToString()), token).Await(); + + ValueTask IRedisTypedClientAsync.GetNextSequenceAsync(CancellationToken token) + => AsAsync().IncrementValueAsync(SequenceKey, token); + + ValueTask IRedisTypedClientAsync.GetNextSequenceAsync(int incrBy, CancellationToken token) + => AsAsync().IncrementValueByAsync(SequenceKey, incrBy, token); + + ValueTask IRedisTypedClientAsync.GetEntryTypeAsync(string key, CancellationToken token) + => AsyncClient.GetEntryTypeAsync(key, token); + + ValueTask IRedisTypedClientAsync.GetRandomKeyAsync(CancellationToken token) + => AsyncClient.GetRandomKeyAsync(token); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void AssertNotNull(object obj, string name = "key") + { + if (obj is null) Throw(name); + static void Throw(string name) => throw new ArgumentNullException(name); + } + + async ValueTask IRedisTypedClientAsync.SetValueAsync(string key, T entity, TimeSpan expireIn, CancellationToken token) + { + AssertNotNull(key); + await AsyncClient.SetAsync(key, SerializeValue(entity), expireIn, token).ConfigureAwait(false); + await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + } + + async ValueTask IRedisTypedClientAsync.SetValueIfNotExistsAsync(string key, T entity, CancellationToken token) + { + var success = await AsyncNative.SetNXAsync(key, SerializeValue(entity), token).IsSuccessAsync().ConfigureAwait(false); + if (success) await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + return success; + } + + async ValueTask IRedisTypedClientAsync.SetValueIfExistsAsync(string key, T entity, CancellationToken token) + { + var success = await AsyncNative.SetAsync(key, SerializeValue(entity), exists: true, token: token).ConfigureAwait(false); + if (success) await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + return success; + } + + async ValueTask IRedisTypedClientAsync.StoreAsync(T entity, TimeSpan expireIn, CancellationToken token) + { + var urnKey = client.UrnKey(entity); + await AsAsync().SetValueAsync(urnKey, entity, expireIn, token).ConfigureAwait(false); + return entity; + } + + ValueTask IRedisTypedClientAsync.GetAndSetValueAsync(string key, T value, CancellationToken token) + => DeserializeValueAsync(AsyncNative.GetSetAsync(key, SerializeValue(value), token)); + + ValueTask IRedisTypedClientAsync.ContainsKeyAsync(string key, CancellationToken token) + => AsyncNative.ExistsAsync(key, token).IsSuccessAsync(); + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(string key, CancellationToken token) + => AsyncNative.DelAsync(key, token).IsSuccessAsync(); + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(string[] keys, CancellationToken token) + => AsyncNative.DelAsync(keys, token).IsSuccessAsync(); + + async ValueTask IRedisTypedClientAsync.RemoveEntryAsync(IHasStringId[] entities, CancellationToken token) + { + var ids = entities.Select(x => x.Id).ToArray(); + var success = await AsyncNative.DelAsync(ids, token).IsSuccessAsync().ConfigureAwait(false); + if (success) await client.RemoveTypeIdsByValuesAsync(ids, token).ConfigureAwait(false); + return success; + } + + ValueTask IRedisTypedClientAsync.IncrementValueAsync(string key, CancellationToken token) + => AsyncNative.IncrAsync(key, token); + + ValueTask IRedisTypedClientAsync.IncrementValueByAsync(string key, int count, CancellationToken token) + => AsyncNative.IncrByAsync(key, count, token); + + ValueTask IRedisTypedClientAsync.DecrementValueAsync(string key, CancellationToken token) + => AsyncNative.DecrAsync(key, token); + + ValueTask IRedisTypedClientAsync.DecrementValueByAsync(string key, int count, CancellationToken token) + => AsyncNative.DecrByAsync(key, count, token); + + ValueTask IRedisTypedClientAsync.ExpireInAsync(object id, TimeSpan expiresIn, CancellationToken token) + { + var key = client.UrnKey(id); + return AsyncClient.ExpireEntryInAsync(key, expiresIn, token); + } + + ValueTask IRedisTypedClientAsync.ExpireAtAsync(object id, DateTime expireAt, CancellationToken token) + { + var key = client.UrnKey(id); + return AsyncClient.ExpireEntryAtAsync(key, expireAt, token); + } + + ValueTask IRedisTypedClientAsync.ExpireEntryInAsync(string key, TimeSpan expireIn, CancellationToken token) + => AsyncClient.ExpireEntryInAsync(key, expireIn, token); + + ValueTask IRedisTypedClientAsync.ExpireEntryAtAsync(string key, DateTime expireAt, CancellationToken token) + => AsyncClient.ExpireEntryAtAsync(key, expireAt, token); + + async ValueTask IRedisTypedClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + => TimeSpan.FromSeconds(await AsyncNative.TtlAsync(key, token).ConfigureAwait(false)); + + ValueTask IRedisTypedClientAsync.ForegroundSaveAsync(CancellationToken token) + => AsyncClient.ForegroundSaveAsync(token); + + ValueTask IRedisTypedClientAsync.BackgroundSaveAsync(CancellationToken token) + => AsyncClient.BackgroundSaveAsync(token); + + ValueTask IRedisTypedClientAsync.FlushDbAsync(CancellationToken token) + => AsyncClient.FlushDbAsync(token); + + ValueTask IRedisTypedClientAsync.FlushAllAsync(CancellationToken token) + => new ValueTask(AsyncClient.FlushAllAsync(token)); + + async ValueTask IRedisTypedClientAsync.SearchKeysAsync(string pattern, CancellationToken token) + { + var strKeys = await AsyncClient.SearchKeysAsync(pattern, token).ConfigureAwait(false); + return SearchKeysParse(strKeys); + } + + private ValueTask> CreateList(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? CreateList(pending.Result).AsValueTaskResult() : Awaited(this, pending); + static async ValueTask> Awaited(RedisTypedClient obj, ValueTask pending) + => obj.CreateList(await pending.ConfigureAwait(false)); + } + private ValueTask DeserializeValueAsync(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? DeserializeValue(pending.Result).AsValueTaskResult() : Awaited(this, pending); + static async ValueTask Awaited(RedisTypedClient obj, ValueTask pending) + => obj.DeserializeValue(await pending.ConfigureAwait(false)); + } + + private static ValueTask DeserializeFromStringAsync(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? DeserializeFromString(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => DeserializeFromString(await pending.ConfigureAwait(false)); + } + + private static ValueTask> CreateGenericMapAsync(ValueTask> pending) + { + return pending.IsCompletedSuccessfully ? CreateGenericMap(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask> pending) + => CreateGenericMap(await pending.ConfigureAwait(false)); + } + + private static ValueTask> ConvertEachToAsync(ValueTask> pending) + { + return pending.IsCompletedSuccessfully ? ConvertEachTo(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask> pending) + => ConvertEachTo(await pending.ConfigureAwait(false)); + } + + ValueTask> IRedisTypedClientAsync.GetSortedEntryValuesAsync(IRedisSetAsync fromSet, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = AsyncNative.SortAsync(fromSet.Id, sortOptions, token); + return CreateList(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreAsHashAsync(T entity, CancellationToken token) + => AsyncClient.StoreAsHashAsync(entity, token); + + ValueTask IRedisTypedClientAsync.GetFromHashAsync(object id, CancellationToken token) + => AsyncClient.GetFromHashAsync(id, token); + + async ValueTask> IRedisTypedClientAsync.GetAllItemsFromSetAsync(IRedisSetAsync fromSet, CancellationToken token) + { + var multiDataList = await AsyncNative.SMembersAsync(fromSet.Id, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.AddItemToSetAsync(IRedisSetAsync toSet, T item, CancellationToken token) + => AsyncNative.SAddAsync(toSet.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.RemoveItemFromSetAsync(IRedisSetAsync fromSet, T item, CancellationToken token) + => AsyncNative.SRemAsync(fromSet.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.PopItemFromSetAsync(IRedisSetAsync fromSet, CancellationToken token) + => DeserializeValueAsync(AsyncNative.SPopAsync(fromSet.Id, token)); + + ValueTask IRedisTypedClientAsync.MoveBetweenSetsAsync(IRedisSetAsync fromSet, IRedisSetAsync toSet, T item, CancellationToken token) + => AsyncNative.SMoveAsync(fromSet.Id, toSet.Id, SerializeValue(item), token); + + ValueTask IRedisTypedClientAsync.GetSetCountAsync(IRedisSetAsync set, CancellationToken token) + => AsyncNative.SCardAsync(set.Id, token); + + ValueTask IRedisTypedClientAsync.SetContainsItemAsync(IRedisSetAsync set, T item, CancellationToken token) + => AsyncNative.SIsMemberAsync(set.Id, SerializeValue(item), token).IsSuccessAsync(); + + async ValueTask> IRedisTypedClientAsync.GetIntersectFromSetsAsync(IRedisSetAsync[] sets, CancellationToken token) + { + var multiDataList = await AsyncNative.SInterAsync(sets.Map(x => x.Id).ToArray(), token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSetsAsync(IRedisSetAsync intoSet, IRedisSetAsync[] sets, CancellationToken token) + => AsyncNative.SInterStoreAsync(intoSet.Id, sets.Map(x => x.Id).ToArray(), token); + + async ValueTask> IRedisTypedClientAsync.GetUnionFromSetsAsync(IRedisSetAsync[] sets, CancellationToken token) + { + var multiDataList = await AsyncNative.SUnionAsync(sets.Map(x => x.Id).ToArray(), token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreUnionFromSetsAsync(IRedisSetAsync intoSet, IRedisSetAsync[] sets, CancellationToken token) + => AsyncNative.SUnionStoreAsync(intoSet.Id, sets.Map(x => x.Id).ToArray(), token); + + async ValueTask> IRedisTypedClientAsync.GetDifferencesFromSetAsync(IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + { + var multiDataList = await AsyncNative.SDiffAsync(fromSet.Id, withSets.Map(x => x.Id).ToArray(), token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreDifferencesFromSetAsync(IRedisSetAsync intoSet, IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + => AsyncNative.SDiffStoreAsync(intoSet.Id, fromSet.Id, withSets.Map(x => x.Id).ToArray(), token); + + ValueTask IRedisTypedClientAsync.GetRandomItemFromSetAsync(IRedisSetAsync fromSet, CancellationToken token) + => DeserializeValueAsync(AsyncNative.SRandMemberAsync(fromSet.Id, token)); + + ValueTask> IRedisTypedClientAsync.GetAllItemsFromListAsync(IRedisListAsync fromList, CancellationToken token) + { + var multiDataList = AsyncNative.LRangeAsync(fromList.Id, FirstElement, LastElement, token); + return CreateList(multiDataList); + } + + ValueTask> IRedisTypedClientAsync.GetRangeFromListAsync(IRedisListAsync fromList, int startingFrom, int endingAt, CancellationToken token) + { + var multiDataList = AsyncNative.LRangeAsync(fromList.Id, startingFrom, endingAt, token); + return CreateList(multiDataList); + } + + ValueTask> IRedisTypedClientAsync.SortListAsync(IRedisListAsync fromList, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = AsyncNative.SortAsync(fromList.Id, sortOptions, token); + return CreateList(multiDataList); + } + + ValueTask IRedisTypedClientAsync.AddItemToListAsync(IRedisListAsync fromList, T value, CancellationToken token) + => AsyncNative.RPushAsync(fromList.Id, SerializeValue(value), token).Await(); + + ValueTask IRedisTypedClientAsync.PrependItemToListAsync(IRedisListAsync fromList, T value, CancellationToken token) + => AsyncNative.LPushAsync(fromList.Id, SerializeValue(value), token).Await(); + + ValueTask IRedisTypedClientAsync.RemoveStartFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.LPopAsync(fromList.Id, token)); + + async ValueTask IRedisTypedClientAsync.BlockingRemoveStartFromListAsync(IRedisListAsync fromList, TimeSpan? timeOut, CancellationToken token) + { + var unblockingKeyAndValue = await AsyncNative.BLPopAsync(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + return unblockingKeyAndValue.Length == 0 + ? default + : DeserializeValue(unblockingKeyAndValue[1]); + } + + ValueTask IRedisTypedClientAsync.RemoveEndFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopAsync(fromList.Id, token)); + + ValueTask IRedisTypedClientAsync.RemoveAllFromListAsync(IRedisListAsync fromList, CancellationToken token) + => AsyncNative.LTrimAsync(fromList.Id, int.MaxValue, FirstElement, token); + + ValueTask IRedisTypedClientAsync.TrimListAsync(IRedisListAsync fromList, int keepStartingFrom, int keepEndingAt, CancellationToken token) + => AsyncNative.LTrimAsync(fromList.Id, keepStartingFrom, keepEndingAt, token); + + ValueTask IRedisTypedClientAsync.RemoveItemFromListAsync(IRedisListAsync fromList, T value, CancellationToken token) + { + const int removeAll = 0; + return AsyncNative.LRemAsync(fromList.Id, removeAll, SerializeValue(value), token); + } + + ValueTask IRedisTypedClientAsync.RemoveItemFromListAsync(IRedisListAsync fromList, T value, int noOfMatches, CancellationToken token) + => AsyncNative.LRemAsync(fromList.Id, noOfMatches, SerializeValue(value), token); + + ValueTask IRedisTypedClientAsync.GetListCountAsync(IRedisListAsync fromList, CancellationToken token) + => AsyncNative.LLenAsync(fromList.Id, token); + + ValueTask IRedisTypedClientAsync.GetItemFromListAsync(IRedisListAsync fromList, int listIndex, CancellationToken token) + => DeserializeValueAsync(AsyncNative.LIndexAsync(fromList.Id, listIndex, token)); + + ValueTask IRedisTypedClientAsync.SetItemInListAsync(IRedisListAsync toList, int listIndex, T value, CancellationToken token) + => AsyncNative.LSetAsync(toList.Id, listIndex, SerializeValue(value), token); + + ValueTask IRedisTypedClientAsync.InsertBeforeItemInListAsync(IRedisListAsync toList, T pivot, T value, CancellationToken token) + => AsyncNative.LInsertAsync(toList.Id, insertBefore: true, pivot: SerializeValue(pivot), value: SerializeValue(value), token: token); + + ValueTask IRedisTypedClientAsync.InsertAfterItemInListAsync(IRedisListAsync toList, T pivot, T value, CancellationToken token) + => AsyncNative.LInsertAsync(toList.Id, insertBefore: false, pivot: SerializeValue(pivot), value: SerializeValue(value), token: token); + + ValueTask IRedisTypedClientAsync.EnqueueItemOnListAsync(IRedisListAsync fromList, T item, CancellationToken token) + => AsyncNative.LPushAsync(fromList.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.DequeueItemFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopAsync(fromList.Id, token)); + + async ValueTask IRedisTypedClientAsync.BlockingDequeueItemFromListAsync(IRedisListAsync fromList, TimeSpan? timeOut, CancellationToken token) + { + var unblockingKeyAndValue = await AsyncNative.BRPopAsync(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + return unblockingKeyAndValue.Length == 0 + ? default + : DeserializeValue(unblockingKeyAndValue[1]); + } + + ValueTask IRedisTypedClientAsync.PushItemToListAsync(IRedisListAsync fromList, T item, CancellationToken token) + => AsyncNative.RPushAsync(fromList.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.PopItemFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopAsync(fromList.Id, token)); + + async ValueTask IRedisTypedClientAsync.BlockingPopItemFromListAsync(IRedisListAsync fromList, TimeSpan? timeOut, CancellationToken token) + { + var unblockingKeyAndValue = await AsyncNative.BRPopAsync(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + return unblockingKeyAndValue.Length == 0 + ? default + : DeserializeValue(unblockingKeyAndValue[1]); + } + + ValueTask IRedisTypedClientAsync.PopAndPushItemBetweenListsAsync(IRedisListAsync fromList, IRedisListAsync toList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopLPushAsync(fromList.Id, toList.Id, token)); + + ValueTask IRedisTypedClientAsync.BlockingPopAndPushItemBetweenListsAsync(IRedisListAsync fromList, IRedisListAsync toList, TimeSpan? timeOut, CancellationToken token) + => DeserializeValueAsync(AsyncNative.BRPopLPushAsync(fromList.Id, toList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token)); + + ValueTask IRedisTypedClientAsync.AddItemToSortedSetAsync(IRedisSortedSetAsync toSet, T value, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(toSet.Id, value.SerializeToString(), token).Await(); + + ValueTask IRedisTypedClientAsync.AddItemToSortedSetAsync(IRedisSortedSetAsync toSet, T value, double score, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(toSet.Id, value.SerializeToString(), score, token).Await(); + + ValueTask IRedisTypedClientAsync.RemoveItemFromSortedSetAsync(IRedisSortedSetAsync fromSet, T value, CancellationToken token) + => AsyncClient.RemoveItemFromSortedSetAsync(fromSet.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.PopItemWithLowestScoreFromSortedSetAsync(IRedisSortedSetAsync fromSet, CancellationToken token) + => DeserializeFromStringAsync(AsyncClient.PopItemWithLowestScoreFromSortedSetAsync(fromSet.Id, token)); + + ValueTask IRedisTypedClientAsync.PopItemWithHighestScoreFromSortedSetAsync(IRedisSortedSetAsync fromSet, CancellationToken token) + => DeserializeFromStringAsync(AsyncClient.PopItemWithHighestScoreFromSortedSetAsync(fromSet.Id, token)); + + ValueTask IRedisTypedClientAsync.SortedSetContainsItemAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.SortedSetContainsItemAsync(set.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.IncrementItemInSortedSetAsync(IRedisSortedSetAsync set, T value, double incrementBy, CancellationToken token) + => AsyncClient.IncrementItemInSortedSetAsync(set.Id, value.SerializeToString(), incrementBy, token); + + ValueTask IRedisTypedClientAsync.GetItemIndexInSortedSetAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetAsync(set.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.GetItemIndexInSortedSetDescAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetDescAsync(set.Id, value.SerializeToString(), token); + + ValueTask> IRedisTypedClientAsync.GetAllItemsFromSortedSetAsync(IRedisSortedSetAsync set, CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetAsync(set.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetAllItemsFromSortedSetDescAsync(IRedisSortedSetAsync set, CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetDescAsync(set.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetAsync(set.Id, fromRank, toRank, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetDescAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetDescAsync(set.Id, fromRank, toRank, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetAllWithScoresFromSortedSetAsync(IRedisSortedSetAsync set, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetAsync(set.Id, FirstElement, LastElement, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetAsync(set.Id, fromRank, toRank, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetDescAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetDescAsync(set.Id, fromRank, toRank, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, skip, take, token)); + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, skip, take, token)); + + ValueTask IRedisTypedClientAsync.RemoveRangeFromSortedSetAsync(IRedisSortedSetAsync set, int minRank, int maxRank, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetAsync(set.Id, minRank, maxRank, token); + + ValueTask IRedisTypedClientAsync.RemoveRangeFromSortedSetByScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetByScoreAsync(set.Id, fromScore, toScore, token); + + ValueTask IRedisTypedClientAsync.GetSortedSetCountAsync(IRedisSortedSetAsync set, CancellationToken token) + => AsyncClient.GetSortedSetCountAsync(set.Id, token); + + ValueTask IRedisTypedClientAsync.GetItemScoreInSortedSetAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.GetItemScoreInSortedSetAsync(set.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), token); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), args, token); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), token); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), args, token); + + ValueTask IRedisTypedClientAsync.HashContainsEntryAsync(IRedisHashAsync hash, TKey key, CancellationToken token) + => AsyncClient.HashContainsEntryAsync(hash.Id, key.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.SetEntryInHashAsync(IRedisHashAsync hash, TKey key, T value, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(hash.Id, key.SerializeToString(), value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.SetEntryInHashIfNotExistsAsync(IRedisHashAsync hash, TKey key, T value, CancellationToken token) + => AsyncClient.SetEntryInHashIfNotExistsAsync(hash.Id, key.SerializeToString(), value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.SetRangeInHashAsync(IRedisHashAsync hash, IEnumerable> keyValuePairs, CancellationToken token) + { + var stringKeyValuePairs = keyValuePairs.ToList().ConvertAll( + x => new KeyValuePair(x.Key.SerializeToString(), x.Value.SerializeToString())); + + return AsyncClient.SetRangeInHashAsync(hash.Id, stringKeyValuePairs, token); + } + + ValueTask IRedisTypedClientAsync.GetValueFromHashAsync(IRedisHashAsync hash, TKey key, CancellationToken token) + => DeserializeFromStringAsync(AsyncClient.GetValueFromHashAsync(hash.Id, key.SerializeToString(), token)); + + ValueTask IRedisTypedClientAsync.RemoveEntryFromHashAsync(IRedisHashAsync hash, TKey key, CancellationToken token) + => AsyncClient.RemoveEntryFromHashAsync(hash.Id, key.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.GetHashCountAsync(IRedisHashAsync hash, CancellationToken token) + => AsyncClient.GetHashCountAsync(hash.Id, token); + + ValueTask> IRedisTypedClientAsync.GetHashKeysAsync(IRedisHashAsync hash, CancellationToken token) + => AsyncClient.GetHashKeysAsync(hash.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetHashValuesAsync(IRedisHashAsync hash, CancellationToken token) + => AsyncClient.GetHashValuesAsync(hash.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetAllEntriesFromHashAsync(IRedisHashAsync hash, CancellationToken token) + => ConvertEachToAsync(AsyncClient.GetAllEntriesFromHashAsync(hash.Id, token)); + + async ValueTask IRedisTypedClientAsync.StoreRelatedEntitiesAsync(object parentId, List children, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + var childKeys = children.ConvertAll(x => client.UrnKey(x)); + + await using var trans = await AsyncClient.CreateTransactionAsync(token).ConfigureAwait(false); + //Ugly but need access to a generic constraint-free StoreAll method + trans.QueueCommand(c => ((RedisClient)c).StoreAllAsyncImpl(children, token)); + trans.QueueCommand(c => c.AddRangeToSetAsync(childRefKey, childKeys, token)); + + await trans.CommitAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisTypedClientAsync.StoreRelatedEntitiesAsync(object parentId, TChild[] children, CancellationToken token) + => AsAsync().StoreRelatedEntitiesAsync(parentId, new List(children), token); + + ValueTask IRedisTypedClientAsync.DeleteRelatedEntitiesAsync(object parentId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return new ValueTask(AsyncClient.RemoveAsync(childRefKey, token)); + } + + ValueTask IRedisTypedClientAsync.DeleteRelatedEntityAsync(object parentId, object childId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return AsyncClient.RemoveItemFromSetAsync(childRefKey, TypeSerializer.SerializeToString(childId), token); + } + + async ValueTask> IRedisTypedClientAsync.GetRelatedEntitiesAsync(object parentId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + var childKeys = (await AsyncClient.GetAllItemsFromSetAsync(childRefKey, token).ConfigureAwait(false)).ToList(); + + return await AsyncClient.As().GetValuesAsync(childKeys, token).ConfigureAwait(false); + } + + ValueTask IRedisTypedClientAsync.GetRelatedEntitiesCountAsync(object parentId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return AsyncClient.GetSetCountAsync(childRefKey, token); + } + + ValueTask IRedisTypedClientAsync.AddToRecentsListAsync(T value, CancellationToken token) + { + var key = client.UrnKey(value); + var nowScore = DateTime.UtcNow.ToUnixTime(); + return AsyncClient.AddItemToSortedSetAsync(RecentSortedSetKey, key, nowScore, token).Await(); + } + + async ValueTask> IRedisTypedClientAsync.GetLatestFromRecentsListAsync(int skip, int take, CancellationToken token) + { + var toRank = take - 1; + var keys = await AsyncClient.GetRangeFromSortedSetDescAsync(RecentSortedSetKey, skip, toRank, token).ConfigureAwait(false); + var values = await AsAsync().GetValuesAsync(keys, token).ConfigureAwait(false); + return values; + } + + async ValueTask> IRedisTypedClientAsync.GetEarliestFromRecentsListAsync(int skip, int take, CancellationToken token) + { + var toRank = take - 1; + var keys = await AsyncClient.GetRangeFromSortedSetAsync(RecentSortedSetKey, skip, toRank, token).ConfigureAwait(false); + var values = await AsAsync().GetValuesAsync(keys, token).ConfigureAwait(false); + return values; + } + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(params string[] args) + => AsAsync().RemoveEntryAsync(args, token: default); + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(params IHasStringId[] entities) + => AsAsync().RemoveEntryAsync(entities, token: default); + + ValueTask> IRedisTypedClientAsync.GetIntersectFromSetsAsync(params IRedisSetAsync[] sets) + => AsAsync().GetIntersectFromSetsAsync(sets, token: default); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSetsAsync(IRedisSetAsync intoSet, params IRedisSetAsync[] sets) + => AsAsync().StoreIntersectFromSetsAsync(intoSet, sets, token: default); + + ValueTask> IRedisTypedClientAsync.GetUnionFromSetsAsync(params IRedisSetAsync[] sets) + => AsAsync().GetUnionFromSetsAsync(sets, token: default); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSetsAsync(IRedisSetAsync intoSet, params IRedisSetAsync[] sets) + => AsAsync().StoreUnionFromSetsAsync(intoSet, sets, token: default); + + ValueTask> IRedisTypedClientAsync.GetDifferencesFromSetAsync(IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().GetDifferencesFromSetAsync(fromSet, withSets, token: default); + + ValueTask IRedisTypedClientAsync.StoreDifferencesFromSetAsync(IRedisSetAsync intoSet, IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().StoreDifferencesFromSetAsync(intoSet, fromSet, withSets, token: default); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, params IRedisSortedSetAsync[] setIds) + => AsAsync().StoreIntersectFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, params IRedisSortedSetAsync[] setIds) + => AsAsync().StoreUnionFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask IRedisTypedClientAsync.StoreRelatedEntitiesAsync(object parentId, params TChild[] children) + => AsAsync().StoreRelatedEntitiesAsync(parentId, children, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient.cs index 37b0f563..cf070975 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -15,96 +15,81 @@ using System.Collections.Generic; using System.Linq; using System.Text; -using ServiceStack.Common.Extensions; -using ServiceStack.Common.Utils; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Model; using ServiceStack.Redis.Pipeline; using ServiceStack.Text; namespace ServiceStack.Redis.Generic { - /// - /// Allows you to get Redis value operations to operate against POCO types. - /// - /// - public partial class RedisTypedClient - : IRedisTypedClient - { - readonly ITypeSerializer serializer = new JsonSerializer(); - private readonly RedisClient client; - - public IRedisClient RedisClient - { - get { return client; } - } - - public IRedisNativeClient NativeClient - { - get { return client; } - } - - /// - /// Use this to share the same redis connection with another - /// - /// The client. - public RedisTypedClient(RedisClient client) - { - this.client = client; - this.Lists = new RedisClientLists(this); - this.Sets = new RedisClientSets(this); - this.SortedSets = new RedisClientSortedSets(this); - - this.SequenceKey = client.GetTypeSequenceKey(); - this.TypeIdsSetKey = client.GetTypeIdsSetKey(); - this.TypeLockKey = string.Concat(client.NamespacePrefix, "lock:", typeof(T).Name); + /// + /// Allows you to get Redis value operations to operate against POCO types. + /// + /// + public partial class RedisTypedClient + : IRedisTypedClient + { + static RedisTypedClient() + { + Redis.RedisClient.__uniqueTypes.Add(typeof(T)); + } + + readonly ITypeSerializer serializer = new JsonSerializer(); + private readonly RedisClient client; + + public IRedisClient RedisClient => client; + + public IRedisNativeClient NativeClient => client; + + /// + /// Use this to share the same redis connection with another + /// + /// The client. + public RedisTypedClient(RedisClient client) + { + this.client = client; + this.Lists = new RedisClientLists(this); + this.Sets = new RedisClientSets(this); + this.SortedSets = new RedisClientSortedSets(this); + + this.SequenceKey = client.GetTypeSequenceKey(); + this.TypeIdsSetKey = client.GetTypeIdsSetKey(); + this.TypeLockKey = string.Concat(client.NamespacePrefix, "lock:", typeof(T).Name); this.RecentSortedSetKey = string.Concat(client.NamespacePrefix, "recent:", typeof(T).Name); - } + } private readonly string RecentSortedSetKey; - public string TypeIdsSetKey { get; set; } - public string TypeLockKey { get; set; } + public string TypeIdsSetKey { get; set; } + public string TypeLockKey { get; set; } - public IRedisTypedTransaction CreateTransaction() - { - return new RedisTypedTransaction(this); - } + public IRedisTypedTransaction CreateTransaction() + { + return new RedisTypedTransaction(this, false); + } public IRedisTypedPipeline CreatePipeline() { return new RedisTypedPipeline(this); } - public IDisposable AcquireLock() - { - return client.AcquireLock(this.TypeLockKey); - } - - public IDisposable AcquireLock(TimeSpan timeOut) - { - return client.AcquireLock(this.TypeLockKey, timeOut); - } - - public IRedisTransactionBase Transaction - { - get - { - return client.Transaction; - } - set - { - client.Transaction = value; - } - } + public IDisposable AcquireLock() + { + return client.AcquireLock(this.TypeLockKey); + } + + public IDisposable AcquireLock(TimeSpan timeOut) + { + return client.AcquireLock(this.TypeLockKey, timeOut); + } + + public IRedisTransactionBase Transaction + { + get => client.Transaction; + set => client.Transaction = value; + } public IRedisPipelineShared Pipeline { - get - { - return client.Pipeline; - } - set - { - client.Pipeline = value; - } + get => client.Pipeline; + set => client.Pipeline = value; } public void Watch(params string[] keys) @@ -117,244 +102,259 @@ public void UnWatch() client.UnWatch(); } - public void Multi() - { - this.client.Multi(); - } - - public void Discard() - { - this.client.Discard(); - } - - public void Exec() - { - client.Exec(); - } - - internal void AddTypeIdsRegisteredDuringPipeline() - { - client.AddTypeIdsRegisteredDuringPipeline(); - } - - internal void ClearTypeIdsRegisteredDuringPipeline() - { - client.ClearTypeIdsRegisteredDuringPipeline(); - } - - public List GetAllKeys() - { - return client.GetAllKeys(); - } - - public IRedisSet TypeIdsSet - { - get - { - return new RedisClientSet(client, client.GetTypeIdsSetKey()); - } - } - - public T this[string key] - { - get { return GetValue(key); } - set { SetEntry(key, value); } - } - - public byte[] SerializeValue(T value) - { - var strValue = serializer.SerializeToString(value); - return Encoding.UTF8.GetBytes(strValue); - } - - public T DeserializeValue(byte[] value) - { - var strValue = value != null ? Encoding.UTF8.GetString(value) : null; - return serializer.DeserializeFromString(strValue); - } - - public void SetEntry(string key, T value) - { - if (key == null) - throw new ArgumentNullException("key"); - - client.Set(key, SerializeValue(value)); - client.RegisterTypeId(value); - } - - public void SetEntry(string key, T value, TimeSpan expireIn) - { - if (key == null) - throw new ArgumentNullException("key"); - - client.Set(key, SerializeValue(value), expireIn); - client.RegisterTypeId(value); - } - - public bool SetEntryIfNotExists(string key, T value) - { - var success = client.SetNX(key, SerializeValue(value)) == RedisNativeClient.Success; - if (success) client.RegisterTypeId(value); - return success; - } - - public T GetValue(string key) - { - return DeserializeValue(client.Get(key)); - } - - public T GetAndSetValue(string key, T value) - { - return DeserializeValue(client.GetSet(key, SerializeValue(value))); - } - - public bool ContainsKey(string key) - { - return client.Exists(key) == RedisNativeClient.Success; - } - - public bool RemoveEntry(string key) - { - return client.Del(key) == RedisNativeClient.Success; - } - - public bool RemoveEntry(params string[] keys) - { - return client.Del(keys) == RedisNativeClient.Success; - } - - public bool RemoveEntry(params IHasStringId[] entities) - { - var ids = entities.ConvertAll(x => x.Id); - var success = client.Del(ids.ToArray()) == RedisNativeClient.Success; - if (success) client.RemoveTypeIds(ids.ToArray()); - return success; - } - - public long IncrementValue(string key) - { - return client.Incr(key); - } - - public long IncrementValueBy(string key, int count) - { - return client.IncrBy(key, count); - } - - public long DecrementValue(string key) - { - return client.Decr(key); - } - - public long DecrementValueBy(string key, int count) - { - return client.DecrBy(key, count); - } - - public string SequenceKey { get; set; } - - public void SetSequence(int value) - { - client.GetSet(SequenceKey, Encoding.UTF8.GetBytes(value.ToString())); - } - - public long GetNextSequence() - { - return IncrementValue(SequenceKey); - } - - public long GetNextSequence(int incrBy) - { - return IncrementValueBy(SequenceKey, incrBy); - } - - public RedisKeyType GetEntryType(string key) - { - return client.GetEntryType(key); - } - - public string GetRandomKey() - { - return client.RandomKey(); - } - - public bool ExpireEntryIn(string key, TimeSpan expireIn) - { - return client.Expire(key, (int)expireIn.TotalSeconds); - } - - public bool ExpireEntryAt(string key, DateTime expireAt) - { - return client.ExpireAt(key, expireAt.ToUnixTime()); - } - - public bool ExpireIn(object id, TimeSpan expireIn) - { - var key = client.UrnKey(id); - return client.Expire(key, (int)expireIn.TotalSeconds); - } - - public bool ExpireAt(object id, DateTime expireAt) - { - var key = client.UrnKey(id); - return client.ExpireAt(key, expireAt.ToUnixTime()); - } - - public TimeSpan GetTimeToLive(string key) - { - return TimeSpan.FromSeconds(client.Ttl(key)); - } - - public void Save() - { - client.Save(); - } - - public void SaveAsync() - { - client.SaveAsync(); - } - - public void FlushDb() - { - client.FlushDb(); - } - - public void FlushAll() - { - client.FlushAll(); - } - - public T[] SearchKeys(string pattern) - { - var strKeys = client.SearchKeys(pattern); - var keysCount = strKeys.Count; - - var keys = new T[keysCount]; - for (var i=0; i < keysCount; i++) - { - keys[i] = serializer.DeserializeFromString(strKeys[i]); - } - return keys; - } - - public List GetValues(List keys) - { - if (keys.IsNullOrEmpty()) return new List(); - - var resultBytesArray = client.MGet(keys.ToArray()); - - var results = new List(); - foreach (var resultBytes in resultBytesArray) - { - if (resultBytes == null) continue; - - var result = DeserializeValue(resultBytes); - results.Add(result); - } - - return results; - } + public void Multi() + { + this.client.Multi(); + } + + public void Discard() + { + this.client.Discard(); + } + + public void Exec() + { + client.Exec(); + } + + internal void AddTypeIdsRegisteredDuringPipeline() + { + client.AddTypeIdsRegisteredDuringPipeline(); + } + + internal void ClearTypeIdsRegisteredDuringPipeline() + { + client.ClearTypeIdsRegisteredDuringPipeline(); + } + + public List GetAllKeys() + { + return client.GetAllKeys(); + } + + public string UrnKey(T entity) + { + return client.UrnKey(entity); + } + + public IRedisSet TypeIdsSet => TypeIdsSetRaw; + + private RedisClientSet TypeIdsSetRaw => new RedisClientSet(client, client.GetTypeIdsSetKey()); + + public T this[string key] + { + get => GetValue(key); + set => SetValue(key, value); + } + + public byte[] SerializeValue(T value) + { + var strValue = serializer.SerializeToString(value); + return Encoding.UTF8.GetBytes(strValue); + } + + public T DeserializeValue(byte[] value) + { + var strValue = value != null ? Encoding.UTF8.GetString(value) : null; + return serializer.DeserializeFromString(strValue); + } + + public void SetValue(string key, T entity) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + client.Set(key, SerializeValue(entity)); + client.RegisterTypeId(entity); + } + + public void SetValue(string key, T entity, TimeSpan expireIn) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + client.Set(key, SerializeValue(entity), expireIn); + client.RegisterTypeId(entity); + } + + public bool SetValueIfNotExists(string key, T entity) + { + var success = client.SetNX(key, SerializeValue(entity)) == RedisNativeClient.Success; + if (success) client.RegisterTypeId(entity); + return success; + } + + public bool SetValueIfExists(string key, T entity) + { + var success = client.Set(key, SerializeValue(entity), exists:true); + if (success) client.RegisterTypeId(entity); + return success; + } + + public T GetValue(string key) + { + return DeserializeValue(client.Get(key)); + } + + public T GetAndSetValue(string key, T value) + { + return DeserializeValue(client.GetSet(key, SerializeValue(value))); + } + + public bool ContainsKey(string key) + { + return client.Exists(key) == RedisNativeClient.Success; + } + + public bool RemoveEntry(string key) + { + return client.Del(key) == RedisNativeClient.Success; + } + + public bool RemoveEntry(params string[] keys) + { + return client.Del(keys) == RedisNativeClient.Success; + } + + public bool RemoveEntry(params IHasStringId[] entities) + { + var ids = entities.Map(x => x.Id); + var success = client.Del(ids.ToArray()) == RedisNativeClient.Success; + if (success) client.RemoveTypeIdsByValues(ids.ToArray()); + return success; + } + + public long IncrementValue(string key) + { + return client.Incr(key); + } + + public long IncrementValueBy(string key, int count) + { + return client.IncrBy(key, count); + } + + public long DecrementValue(string key) + { + return client.Decr(key); + } + + public long DecrementValueBy(string key, int count) + { + return client.DecrBy(key, count); + } + + public string SequenceKey { get; set; } + + public void SetSequence(int value) + { + client.GetSet(SequenceKey, Encoding.UTF8.GetBytes(value.ToString())); + } + + public long GetNextSequence() + { + return IncrementValue(SequenceKey); + } + + public long GetNextSequence(int incrBy) + { + return IncrementValueBy(SequenceKey, incrBy); + } + + public RedisKeyType GetEntryType(string key) + { + return client.GetEntryType(key); + } + + public string GetRandomKey() + { + return client.RandomKey(); + } + + public bool ExpireEntryIn(string key, TimeSpan expireIn) + { + return client.ExpireEntryIn(key, expireIn); + } + + public bool ExpireEntryAt(string key, DateTime expireAt) + { + return client.ExpireEntryAt(key, expireAt); + } + + public bool ExpireIn(object id, TimeSpan expireIn) + { + var key = client.UrnKey(id); + return client.ExpireEntryIn(key, expireIn); + } + + public bool ExpireAt(object id, DateTime expireAt) + { + var key = client.UrnKey(id); + return client.ExpireEntryAt(key, expireAt); + } + + public TimeSpan GetTimeToLive(string key) + { + return TimeSpan.FromSeconds(client.Ttl(key)); + } + + public void Save() + { + client.Save(); + } + + public void SaveAsync() + { + client.SaveAsync(); + } + + public void FlushDb() + { + client.FlushDb(); + } + + public void FlushAll() + { + client.FlushAll(); + } + + public T[] SearchKeys(string pattern) + { + var strKeys = client.SearchKeys(pattern); + return SearchKeysParse(strKeys); + } + private T[] SearchKeysParse(List strKeys) + { + var keysCount = strKeys.Count; + + var keys = new T[keysCount]; + for (var i = 0; i < keysCount; i++) + { + keys[i] = serializer.DeserializeFromString(strKeys[i]); + } + return keys; + } + + public List GetValues(List keys) + { + if (keys.IsNullOrEmpty()) return new List(); + + var resultBytesArray = client.MGet(keys.ToArray()); + return ProcessGetValues(resultBytesArray); + } + private List ProcessGetValues(byte[][] resultBytesArray) + { + var results = new List(); + foreach (var resultBytes in resultBytesArray) + { + if (resultBytes == null) continue; + + var result = DeserializeValue(resultBytes); + results.Add(result); + } + + return results; + } public void StoreAsHash(T entity) { @@ -367,98 +367,129 @@ public T GetFromHash(object id) } - #region Implementation of IBasicPersistenceProvider + #region Implementation of IBasicPersistenceProvider - public T GetById(object id) - { - var key = client.UrnKey(id); - return this.GetValue(key); - } + public T GetById(object id) + { + var key = client.UrnKey(id); + return this.GetValue(key); + } - public IList GetByIds(IEnumerable ids) - { - if (ids != null) - { - var urnKeys = ids.ConvertAll(x => client.UrnKey(x)); - if (urnKeys.Count != 0) - return GetValues(urnKeys); - } + public IList GetByIds(IEnumerable ids) + { + if (ids != null) + { + var urnKeys = ids.Map(x => client.UrnKey(x)); + if (urnKeys.Count != 0) + return GetValues(urnKeys); + } - return new List(); - } + return new List(); + } - public IList GetAll() - { - var allKeys = client.GetAllItemsFromSet(this.TypeIdsSetKey); - return this.GetByIds(allKeys.ToArray()); - } + public IList GetAll() + { + var allKeys = client.GetAllItemsFromSet(this.TypeIdsSetKey); + return this.GetByIds(allKeys.ToArray()); + } - public T Store(T entity) - { + public T Store(T entity) + { var urnKey = client.UrnKey(entity); - this.SetEntry(urnKey, entity); + this.SetValue(urnKey, entity); + return entity; + } - return entity; - } + public T Store(T entity, TimeSpan expireIn) + { + var urnKey = client.UrnKey(entity); + this.SetValue(urnKey, entity, expireIn); + return entity; + } - public void StoreAll(IEnumerable entities) - { - if (entities == null) return; + public void StoreAll(IEnumerable entities) + { + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + client.MSet(keys, values); + client.RegisterTypeIds(entitiesList); + } + } - var entitiesList = entities.ToList(); - var len = entitiesList.Count; + private bool PrepareStoreAll(IEnumerable entities, out byte[][] keys, out byte[][] values, out List entitiesList) + { + if (entities == null) + { + keys = values = default; + entitiesList = default; + return false; + } - var keys = new byte[len][]; - var values = new byte[len][]; + entitiesList = entities.ToList(); + var len = entitiesList.Count; - for (var i = 0; i < len; i++) - { - keys[i] = client.UrnKey(entitiesList[i]).ToUtf8Bytes(); - values[i] = Redis.RedisClient.SerializeToUtf8Bytes(entitiesList[i]); - } + keys = new byte[len][]; + values = new byte[len][]; - client.MSet(keys, values); - client.RegisterTypeIds(entitiesList); - } + for (var i = 0; i < len; i++) + { + keys[i] = client.UrnKey(entitiesList[i]).ToUtf8Bytes(); + values[i] = Redis.RedisClient.SerializeToUtf8Bytes(entitiesList[i]); + } + return true; + } - public void Delete(T entity) - { + public void Delete(T entity) + { var urnKey = client.UrnKey(entity); - this.RemoveEntry(urnKey); - client.RemoveTypeIds(entity); - } + this.RemoveEntry(urnKey); + client.RemoveTypeIdsByValue(entity); + } - public void DeleteById(object id) - { - var urnKey = client.UrnKey(id); + public void DeleteById(object id) + { + var urnKey = client.UrnKey(id); - this.RemoveEntry(urnKey); - client.RemoveTypeIds(id.ToString()); - } + this.RemoveEntry(urnKey); + client.RemoveTypeIdsById(id.ToString()); + } - public void DeleteByIds(IEnumerable ids) - { - if (ids == null) return; + public void DeleteByIds(IEnumerable ids) + { + if (ids == null) return; - var urnKeys = ids.ConvertAll(t => client.UrnKey(t)); - if (urnKeys.Count > 0) + var idStrings = ids.Map(x => x.ToString()).ToArray(); + var urnKeys = idStrings.Select(t => client.UrnKey(t)).ToArray(); + if (urnKeys.Length > 0) { - this.RemoveEntry(urnKeys.ToArray()); - client.RemoveTypeIds(ids.ConvertAll(x => x.ToString()).ToArray()); + this.RemoveEntry(urnKeys); + client.RemoveTypeIdsByIds(idStrings); } - } + } - public void DeleteAll() - { - var urnKeys = client.GetAllItemsFromSet(this.TypeIdsSetKey); - if (urnKeys.Count > 0) + private void DeleteAll(ulong cursor, int pageSize) + { + do { - this.RemoveEntry(urnKeys.ToArray()); - this.RemoveEntry(this.TypeIdsSetKey); - } - } + var scanResult = client.SScan(this.TypeIdsSetKey, cursor, pageSize); + cursor = scanResult.Cursor; + var ids = scanResult.Results.Select(x => Encoding.UTF8.GetString(x)).ToList(); + var urnKeys = ids.Map(t => client.UrnKey(t)); + if (urnKeys.Count > 0) + { + this.RemoveEntry(urnKeys.ToArray()); + } + } while (cursor != 0); + + this.RemoveEntry(this.TypeIdsSetKey); + } + + public void DeleteAll() + { + DeleteAll(0,RedisConfig.CommandKeysBatchSize); + } - #endregion + #endregion internal void ExpectQueued() { @@ -474,14 +505,15 @@ internal int ReadMultiDataResultCount() } public void FlushSendBuffer() { - client.FlushSendBuffer(); + client.FlushAndResetSendBuffer(); } public void ResetSendBuffer() { client.ResetSendBuffer(); } - - [Obsolete("Does nothing currently, RedisTypedClient will not be IDisposable in a future version")] - public void Dispose() {} - } + internal void EndPipeline() + { + client.EndPipeline(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_App.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_App.cs index ee319379..6a7a1e83 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_App.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_App.cs @@ -1,89 +1,85 @@ using System; using System.Collections.Generic; using System.Linq; -using ServiceStack.Common.Utils; using ServiceStack.Text; namespace ServiceStack.Redis.Generic { - public partial class RedisTypedClient - { - private string GetChildReferenceSetKey(object parentId) - { - return string.Concat(client.NamespacePrefix, "ref:", typeof(T).Name, "/", typeof(TChild).Name, ":", parentId); - } + public partial class RedisTypedClient + { + private string GetChildReferenceSetKey(object parentId) + { + return string.Concat(client.NamespacePrefix, "ref:", typeof(T).Name, "/", typeof(TChild).Name, ":", parentId); + } - public void StoreRelatedEntities(object parentId, List children) - { - var childRefKey = GetChildReferenceSetKey(parentId); + public void StoreRelatedEntities(object parentId, List children) + { + var childRefKey = GetChildReferenceSetKey(parentId); var childKeys = children.ConvertAll(x => client.UrnKey(x)); - using (var trans = client.CreateTransaction()) - { - //Ugly but need access to a generic constraint-free StoreAll method - trans.QueueCommand(c => ((RedisClient)c)._StoreAll(children)); - trans.QueueCommand(c => c.AddRangeToSet(childRefKey, childKeys)); + using (var trans = client.CreateTransaction()) + { + //Ugly but need access to a generic constraint-free StoreAll method + trans.QueueCommand(c => ((RedisClient)c)._StoreAll(children)); + trans.QueueCommand(c => c.AddRangeToSet(childRefKey, childKeys)); - trans.Commit(); - } - } + trans.Commit(); + } + } - public void StoreRelatedEntities(object parentId, params TChild[] children) - { - StoreRelatedEntities(parentId, new List(children)); - } + public void StoreRelatedEntities(object parentId, params TChild[] children) + { + StoreRelatedEntities(parentId, new List(children)); + } - public void DeleteRelatedEntity(object parentId, object childId) - { - var childRefKey = GetChildReferenceSetKey(parentId); + public void DeleteRelatedEntity(object parentId, object childId) + { + var childRefKey = GetChildReferenceSetKey(parentId); - client.RemoveItemFromSet(childRefKey, TypeSerializer.SerializeToString(childId)); - } + client.RemoveItemFromSet(childRefKey, TypeSerializer.SerializeToString(childId)); + } - public void DeleteRelatedEntities(object parentId) - { - var childRefKey = GetChildReferenceSetKey(parentId); - client.Remove(childRefKey); - } + public void DeleteRelatedEntities(object parentId) + { + var childRefKey = GetChildReferenceSetKey(parentId); + client.Remove(childRefKey); + } - public List GetRelatedEntities(object parentId) - { - var childRefKey = GetChildReferenceSetKey(parentId); - var childKeys = client.GetAllItemsFromSet(childRefKey).ToList(); + public List GetRelatedEntities(object parentId) + { + var childRefKey = GetChildReferenceSetKey(parentId); + var childKeys = client.GetAllItemsFromSet(childRefKey).ToList(); - using (var typedClient = client.GetTypedClient()) - { - return typedClient.GetValues(childKeys); - } - } + return client.As().GetValues(childKeys); + } - public int GetRelatedEntitiesCount(object parentId) - { - var childRefKey = GetChildReferenceSetKey(parentId); - return client.GetSetCount(childRefKey); - } + public long GetRelatedEntitiesCount(object parentId) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return client.GetSetCount(childRefKey); + } - public void AddToRecentsList(T value) - { + public void AddToRecentsList(T value) + { var key = client.UrnKey(value); - var nowScore = DateTime.UtcNow.ToUnixTime(); - client.AddItemToSortedSet(RecentSortedSetKey, key, nowScore); - } + var nowScore = DateTime.UtcNow.ToUnixTime(); + client.AddItemToSortedSet(RecentSortedSetKey, key, nowScore); + } - public List GetLatestFromRecentsList(int skip, int take) - { - var toRank = take - 1; - var keys = client.GetRangeFromSortedSetDesc(RecentSortedSetKey, skip, toRank); - var values = GetValues(keys); - return values; - } + public List GetLatestFromRecentsList(int skip, int take) + { + var toRank = take - 1; + var keys = client.GetRangeFromSortedSetDesc(RecentSortedSetKey, skip, toRank); + var values = GetValues(keys); + return values; + } - public List GetEarliestFromRecentsList(int skip, int take) - { - var toRank = take - 1; - var keys = client.GetRangeFromSortedSet(RecentSortedSetKey, skip, toRank); - var values = GetValues(keys); - return values; - } - } + public List GetEarliestFromRecentsList(int skip, int take) + { + var toRank = take - 1; + var keys = client.GetRangeFromSortedSet(RecentSortedSetKey, skip, toRank); + var values = GetValues(keys); + return values; + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs index cfbbd3f3..e60432d0 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -17,76 +17,76 @@ namespace ServiceStack.Redis.Generic { - public partial class RedisTypedClient - { - public IRedisHash GetHash(string hashId) - { - return new RedisClientHash(this, hashId); - } + public partial class RedisTypedClient + { + public IRedisHash GetHash(string hashId) + { + return new RedisClientHash(this, hashId); + } - public bool HashContainsEntry(IRedisHash hash, TKey key) - { - return client.HashContainsEntry(hash.Id, key.SerializeToString()); - } + public bool HashContainsEntry(IRedisHash hash, TKey key) + { + return client.HashContainsEntry(hash.Id, key.SerializeToString()); + } - public bool SetEntryInHash(IRedisHash hash, TKey key, T value) - { - return client.SetEntryInHash(hash.Id, key.SerializeToString(), value.SerializeToString()); - } + public bool SetEntryInHash(IRedisHash hash, TKey key, T value) + { + return client.SetEntryInHash(hash.Id, key.SerializeToString(), value.SerializeToString()); + } - public bool SetEntryInHashIfNotExists(IRedisHash hash, TKey key, T value) - { - return client.SetEntryInHashIfNotExists(hash.Id, key.SerializeToString(), value.SerializeToString()); - } + public bool SetEntryInHashIfNotExists(IRedisHash hash, TKey key, T value) + { + return client.SetEntryInHashIfNotExists(hash.Id, key.SerializeToString(), value.SerializeToString()); + } - public void SetRangeInHash(IRedisHash hash, IEnumerable> keyValuePairs) - { - var stringKeyValuePairs = keyValuePairs.ToList().ConvertAll( - x => new KeyValuePair(x.Key.SerializeToString(), x.Value.SerializeToString())); + public void SetRangeInHash(IRedisHash hash, IEnumerable> keyValuePairs) + { + var stringKeyValuePairs = keyValuePairs.ToList().ConvertAll( + x => new KeyValuePair(x.Key.SerializeToString(), x.Value.SerializeToString())); - client.SetRangeInHash(hash.Id, stringKeyValuePairs); - } + client.SetRangeInHash(hash.Id, stringKeyValuePairs); + } - public T GetValueFromHash(IRedisHash hash, TKey key) - { - return DeserializeFromString( - client.GetValueFromHash(hash.Id, key.SerializeToString())); - } + public T GetValueFromHash(IRedisHash hash, TKey key) + { + return DeserializeFromString( + client.GetValueFromHash(hash.Id, key.SerializeToString())); + } - public bool RemoveEntryFromHash(IRedisHash hash, TKey key) - { - return client.RemoveEntryFromHash(hash.Id, key.SerializeToString()); - } + public bool RemoveEntryFromHash(IRedisHash hash, TKey key) + { + return client.RemoveEntryFromHash(hash.Id, key.SerializeToString()); + } - public int GetHashCount(IRedisHash hash) - { - return client.GetHashCount(hash.Id); - } + public long GetHashCount(IRedisHash hash) + { + return client.GetHashCount(hash.Id); + } - public List GetHashKeys(IRedisHash hash) - { - return client.GetHashKeys(hash.Id).ConvertEachTo(); - } + public List GetHashKeys(IRedisHash hash) + { + return client.GetHashKeys(hash.Id).ConvertEachTo(); + } - public List GetHashValues(IRedisHash hash) - { - return client.GetHashValues(hash.Id).ConvertEachTo(); - } + public List GetHashValues(IRedisHash hash) + { + return client.GetHashValues(hash.Id).ConvertEachTo(); + } - public Dictionary GetAllEntriesFromHash(IRedisHash hash) - { - return ConvertEachTo(client.GetAllEntriesFromHash(hash.Id)); - } + public Dictionary GetAllEntriesFromHash(IRedisHash hash) + { + return ConvertEachTo(client.GetAllEntriesFromHash(hash.Id)); + } - public static Dictionary ConvertEachTo(IDictionary map) - { - var to = new Dictionary(); - foreach (var item in map) - { - to[JsonSerializer.DeserializeFromString(item.Key)] - = JsonSerializer.DeserializeFromString(item.Value); - } - return to; - } - } + public static Dictionary ConvertEachTo(IDictionary map) + { + var to = new Dictionary(); + foreach (var item in map) + { + to[JsonSerializer.DeserializeFromString(item.Key)] + = JsonSerializer.DeserializeFromString(item.Value); + } + return to; + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_List.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.Async.cs new file mode 100644 index 00000000..023590f3 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.Async.cs @@ -0,0 +1,33 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + public partial class RedisTypedClient + { + internal partial class RedisClientLists + : IHasNamed> + { + IRedisListAsync IHasNamed>.this[string listId] + { + get => new RedisClientList(client, listId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs index dfb1d3f5..144099e2 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs @@ -1,212 +1,213 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Linq; -using ServiceStack.DesignPatterns.Model; +using System.Threading; +using ServiceStack.Model; namespace ServiceStack.Redis.Generic { - public partial class RedisTypedClient - { - const int FirstElement = 0; - const int LastElement = -1; - - public IHasNamed> Lists { get; set; } - - internal class RedisClientLists - : IHasNamed> - { - private readonly RedisTypedClient client; - - public RedisClientLists(RedisTypedClient client) - { - this.client = client; - } - - public IRedisList this[string listId] - { - get - { - return new RedisClientList(client, listId); - } - set - { - var list = this[listId]; - list.Clear(); - list.CopyTo(value.ToArray(), 0); - } - } - } - - private List CreateList(byte[][] multiDataList) - { - if (multiDataList == null) return new List(); - - var results = new List(); - foreach (var multiData in multiDataList) - { - results.Add(DeserializeValue(multiData)); - } - return results; - } - - public List GetAllItemsFromList(IRedisList fromList) - { - var multiDataList = client.LRange(fromList.Id, FirstElement, LastElement); - return CreateList(multiDataList); - } - - public List GetRangeFromList(IRedisList fromList, int startingFrom, int endingAt) - { - var multiDataList = client.LRange(fromList.Id, startingFrom, endingAt); - return CreateList(multiDataList); - } - - public List SortList(IRedisList fromList, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; - var multiDataList = client.Sort(fromList.Id, sortOptions); - return CreateList(multiDataList); - } - - public void AddItemToList(IRedisList fromList, T value) - { - client.RPush(fromList.Id, SerializeValue(value)); - } - - //TODO: replace it with a pipeline implementation ala AddRangeToSet - public void AddRangeToList(IRedisList fromList, IEnumerable values) - { - foreach (var value in values) - { - AddItemToList(fromList, value); - } - } - - public void PrependItemToList(IRedisList fromList, T value) - { - client.LPush(fromList.Id, SerializeValue(value)); - } - - public T RemoveStartFromList(IRedisList fromList) - { - return DeserializeValue(client.LPop(fromList.Id)); - } - - public T BlockingRemoveStartFromList(IRedisList fromList, TimeSpan? timeOut) - { - var unblockingKeyAndValue = client.BLPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); + public partial class RedisTypedClient + { + const int FirstElement = 0; + const int LastElement = -1; + + public IHasNamed> Lists { get; set; } + + internal partial class RedisClientLists + : IHasNamed> + { + private readonly RedisTypedClient client; + + public RedisClientLists(RedisTypedClient client) + { + this.client = client; + } + + public IRedisList this[string listId] + { + get + { + return new RedisClientList(client, listId); + } + set + { + var list = this[listId]; + list.Clear(); + list.CopyTo(value.ToArray(), 0); + } + } + } + + private List CreateList(byte[][] multiDataList) + { + if (multiDataList == null) return new List(); + + var results = new List(); + foreach (var multiData in multiDataList) + { + results.Add(DeserializeValue(multiData)); + } + return results; + } + + public List GetAllItemsFromList(IRedisList fromList) + { + var multiDataList = client.LRange(fromList.Id, FirstElement, LastElement); + return CreateList(multiDataList); + } + + public List GetRangeFromList(IRedisList fromList, int startingFrom, int endingAt) + { + var multiDataList = client.LRange(fromList.Id, startingFrom, endingAt); + return CreateList(multiDataList); + } + + public List SortList(IRedisList fromList, int startingFrom, int endingAt) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = client.Sort(fromList.Id, sortOptions); + return CreateList(multiDataList); + } + + public void AddItemToList(IRedisList fromList, T value) + { + client.RPush(fromList.Id, SerializeValue(value)); + } + + //TODO: replace it with a pipeline implementation ala AddRangeToSet + public void AddRangeToList(IRedisList fromList, IEnumerable values) + { + foreach (var value in values) + { + AddItemToList(fromList, value); + } + } + + public void PrependItemToList(IRedisList fromList, T value) + { + client.LPush(fromList.Id, SerializeValue(value)); + } + + public T RemoveStartFromList(IRedisList fromList) + { + return DeserializeValue(client.LPop(fromList.Id)); + } + + public T BlockingRemoveStartFromList(IRedisList fromList, TimeSpan? timeOut) + { + var unblockingKeyAndValue = client.BLPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); return unblockingKeyAndValue.Length == 0 ? default(T) : DeserializeValue(unblockingKeyAndValue[1]); - } - - public T RemoveEndFromList(IRedisList fromList) - { - return DeserializeValue(client.RPop(fromList.Id)); - } - - public void RemoveAllFromList(IRedisList fromList) - { - client.LTrim(fromList.Id, int.MaxValue, FirstElement); - } - - public void TrimList(IRedisList fromList, int keepStartingFrom, int keepEndingAt) - { - client.LTrim(fromList.Id, keepStartingFrom, keepEndingAt); - } - - public int RemoveItemFromList(IRedisList fromList, T value) - { - const int removeAll = 0; - return client.LRem(fromList.Id, removeAll, SerializeValue(value)); - } - - public int RemoveItemFromList(IRedisList fromList, T value, int noOfMatches) - { - return client.LRem(fromList.Id, noOfMatches, SerializeValue(value)); - } - - public int GetListCount(IRedisList fromList) - { - return client.LLen(fromList.Id); - } - - public T GetItemFromList(IRedisList fromList, int listIndex) - { - return DeserializeValue(client.LIndex(fromList.Id, listIndex)); - } - - public void SetItemInList(IRedisList toList, int listIndex, T value) - { - client.LSet(toList.Id, listIndex, SerializeValue(value)); - } - - public void InsertBeforeItemInList(IRedisList toList, T pivot, T value) - { + } + + public T RemoveEndFromList(IRedisList fromList) + { + return DeserializeValue(client.RPop(fromList.Id)); + } + + public void RemoveAllFromList(IRedisList fromList) + { + client.LTrim(fromList.Id, int.MaxValue, FirstElement); + } + + public void TrimList(IRedisList fromList, int keepStartingFrom, int keepEndingAt) + { + client.LTrim(fromList.Id, keepStartingFrom, keepEndingAt); + } + + public long RemoveItemFromList(IRedisList fromList, T value) + { + const int removeAll = 0; + return client.LRem(fromList.Id, removeAll, SerializeValue(value)); + } + + public long RemoveItemFromList(IRedisList fromList, T value, int noOfMatches) + { + return client.LRem(fromList.Id, noOfMatches, SerializeValue(value)); + } + + public long GetListCount(IRedisList fromList) + { + return client.LLen(fromList.Id); + } + + public T GetItemFromList(IRedisList fromList, int listIndex) + { + return DeserializeValue(client.LIndex(fromList.Id, listIndex)); + } + + public void SetItemInList(IRedisList toList, int listIndex, T value) + { + client.LSet(toList.Id, listIndex, SerializeValue(value)); + } + + public void InsertBeforeItemInList(IRedisList toList, T pivot, T value) + { client.LInsert(toList.Id, insertBefore: true, pivot: SerializeValue(pivot), value: SerializeValue(value)); - } + } - public void InsertAfterItemInList(IRedisList toList, T pivot, T value) - { + public void InsertAfterItemInList(IRedisList toList, T pivot, T value) + { client.LInsert(toList.Id, insertBefore: false, pivot: SerializeValue(pivot), value: SerializeValue(value)); } - public void EnqueueItemOnList(IRedisList fromList, T item) - { - client.LPush(fromList.Id, SerializeValue(item)); - } + public void EnqueueItemOnList(IRedisList fromList, T item) + { + client.LPush(fromList.Id, SerializeValue(item)); + } - public T DequeueItemFromList(IRedisList fromList) - { - return DeserializeValue(client.RPop(fromList.Id)); - } + public T DequeueItemFromList(IRedisList fromList) + { + return DeserializeValue(client.RPop(fromList.Id)); + } - public T BlockingDequeueItemFromList(IRedisList fromList, TimeSpan? timeOut) - { - var unblockingKeyAndValue = client.BRPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); + public T BlockingDequeueItemFromList(IRedisList fromList, TimeSpan? timeOut) + { + var unblockingKeyAndValue = client.BRPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); return unblockingKeyAndValue.Length == 0 ? default(T) : DeserializeValue(unblockingKeyAndValue[1]); - } + } - public void PushItemToList(IRedisList fromList, T item) - { - client.RPush(fromList.Id, SerializeValue(item)); - } + public void PushItemToList(IRedisList fromList, T item) + { + client.RPush(fromList.Id, SerializeValue(item)); + } - public T PopItemFromList(IRedisList fromList) - { - return DeserializeValue(client.RPop(fromList.Id)); - } + public T PopItemFromList(IRedisList fromList) + { + return DeserializeValue(client.RPop(fromList.Id)); + } - public T BlockingPopItemFromList(IRedisList fromList, TimeSpan? timeOut) - { - var unblockingKeyAndValue = client.BRPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); + public T BlockingPopItemFromList(IRedisList fromList, TimeSpan? timeOut) + { + var unblockingKeyAndValue = client.BRPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); return unblockingKeyAndValue.Length == 0 ? default(T) : DeserializeValue(unblockingKeyAndValue[1]); - } + } - public T PopAndPushItemBetweenLists(IRedisList fromList, IRedisList toList) - { - return DeserializeValue(client.RPopLPush(fromList.Id, toList.Id)); - } + public T PopAndPushItemBetweenLists(IRedisList fromList, IRedisList toList) + { + return DeserializeValue(client.RPopLPush(fromList.Id, toList.Id)); + } public T BlockingPopAndPushItemBetweenLists(IRedisList fromList, IRedisList toList, TimeSpan? timeOut) - { + { return DeserializeValue(client.BRPopLPush(fromList.Id, toList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds)); - } - } + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.Async.cs new file mode 100644 index 00000000..24889db6 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis.Generic +{ + public partial class RedisTypedClient + { + internal partial class RedisClientSets + : IHasNamed> + { + IRedisSetAsync IHasNamed>.this[string setId] + { + get => new RedisClientSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs index ce61a136..d8a0510e 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs @@ -1,147 +1,147 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System.Collections.Generic; using System.Linq; -using ServiceStack.Common.Extensions; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Common; +using ServiceStack.Model; namespace ServiceStack.Redis.Generic { - public partial class RedisTypedClient - { - public IHasNamed> Sets { get; set; } - - public int Db - { - get { return client.Db; } - set { client.Db = value; } - } - - internal class RedisClientSets - : IHasNamed> - { - private readonly RedisTypedClient client; - - public RedisClientSets(RedisTypedClient client) - { - this.client = client; - } - - public IRedisSet this[string setId] - { - get - { - return new RedisClientSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - private HashSet CreateHashSet(byte[][] multiDataList) - { - var results = new HashSet(); - foreach (var multiData in multiDataList) - { - results.Add(DeserializeValue(multiData)); - } - return results; - } - - public List GetSortedEntryValues(IRedisSet fromSet, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; - var multiDataList = client.Sort(fromSet.Id, sortOptions); - return CreateList(multiDataList); - } - - public HashSet GetAllItemsFromSet(IRedisSet fromSet) - { - var multiDataList = client.SMembers(fromSet.Id); - return CreateHashSet(multiDataList); - } - - public void AddItemToSet(IRedisSet toSet, T item) - { - client.SAdd(toSet.Id, SerializeValue(item)); - } - - public void RemoveItemFromSet(IRedisSet fromSet, T item) - { - client.SRem(fromSet.Id, SerializeValue(item)); - } - - public T PopItemFromSet(IRedisSet fromSet) - { - return DeserializeValue(client.SPop(fromSet.Id)); - } - - public void MoveBetweenSets(IRedisSet fromSet, IRedisSet toSet, T item) - { - client.SMove(fromSet.Id, toSet.Id, SerializeValue(item)); - } - - public int GetSetCount(IRedisSet set) - { - return client.SCard(set.Id); - } - - public bool SetContainsItem(IRedisSet set, T item) - { - return client.SIsMember(set.Id, SerializeValue(item)) == 1; - } - - public HashSet GetIntersectFromSets(params IRedisSet[] sets) - { - var multiDataList = client.SInter(sets.ConvertAll(x => x.Id).ToArray()); - return CreateHashSet(multiDataList); - } - - public void StoreIntersectFromSets(IRedisSet intoSet, params IRedisSet[] sets) - { - client.SInterStore(intoSet.Id, sets.ConvertAll(x => x.Id).ToArray()); - } - - public HashSet GetUnionFromSets(params IRedisSet[] sets) - { - var multiDataList = client.SUnion(sets.ConvertAll(x => x.Id).ToArray()); - return CreateHashSet(multiDataList); - } - - public void StoreUnionFromSets(IRedisSet intoSet, params IRedisSet[] sets) - { - client.SUnionStore(intoSet.Id, sets.ConvertAll(x => x.Id).ToArray()); - } - - public HashSet GetDifferencesFromSet(IRedisSet fromSet, params IRedisSet[] withSets) - { - var multiDataList = client.SDiff(fromSet.Id, withSets.ConvertAll(x => x.Id).ToArray()); - return CreateHashSet(multiDataList); - } - - public void StoreDifferencesFromSet(IRedisSet intoSet, IRedisSet fromSet, params IRedisSet[] withSets) - { - client.SDiffStore(intoSet.Id, fromSet.Id, withSets.ConvertAll(x => x.Id).ToArray()); - } - - public T GetRandomItemFromSet(IRedisSet fromSet) - { - return DeserializeValue(client.SRandMember(fromSet.Id)); - } - - } + public partial class RedisTypedClient + { + public IHasNamed> Sets { get; set; } + + public long Db + { + get { return client.Db; } + set { client.Db = value; } + } + + internal partial class RedisClientSets + : IHasNamed> + { + private readonly RedisTypedClient client; + + public RedisClientSets(RedisTypedClient client) + { + this.client = client; + } + + public IRedisSet this[string setId] + { + get + { + return new RedisClientSet(client, setId); + } + set + { + var col = this[setId]; + col.Clear(); + col.CopyTo(value.ToArray(), 0); + } + } + } + + private HashSet CreateHashSet(byte[][] multiDataList) + { + var results = new HashSet(); + foreach (var multiData in multiDataList) + { + results.Add(DeserializeValue(multiData)); + } + return results; + } + + public List GetSortedEntryValues(IRedisSet fromSet, int startingFrom, int endingAt) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = client.Sort(fromSet.Id, sortOptions); + return CreateList(multiDataList); + } + + public HashSet GetAllItemsFromSet(IRedisSet fromSet) + { + var multiDataList = client.SMembers(fromSet.Id); + return CreateHashSet(multiDataList); + } + + public void AddItemToSet(IRedisSet toSet, T item) + { + client.SAdd(toSet.Id, SerializeValue(item)); + } + + public void RemoveItemFromSet(IRedisSet fromSet, T item) + { + client.SRem(fromSet.Id, SerializeValue(item)); + } + + public T PopItemFromSet(IRedisSet fromSet) + { + return DeserializeValue(client.SPop(fromSet.Id)); + } + + public void MoveBetweenSets(IRedisSet fromSet, IRedisSet toSet, T item) + { + client.SMove(fromSet.Id, toSet.Id, SerializeValue(item)); + } + + public long GetSetCount(IRedisSet set) + { + return client.SCard(set.Id); + } + + public bool SetContainsItem(IRedisSet set, T item) + { + return client.SIsMember(set.Id, SerializeValue(item)) == 1; + } + + public HashSet GetIntersectFromSets(params IRedisSet[] sets) + { + var multiDataList = client.SInter(sets.Map(x => x.Id).ToArray()); + return CreateHashSet(multiDataList); + } + + public void StoreIntersectFromSets(IRedisSet intoSet, params IRedisSet[] sets) + { + client.SInterStore(intoSet.Id, sets.Map(x => x.Id).ToArray()); + } + + public HashSet GetUnionFromSets(params IRedisSet[] sets) + { + var multiDataList = client.SUnion(sets.Map(x => x.Id).ToArray()); + return CreateHashSet(multiDataList); + } + + public void StoreUnionFromSets(IRedisSet intoSet, params IRedisSet[] sets) + { + client.SUnionStore(intoSet.Id, sets.Map(x => x.Id).ToArray()); + } + + public HashSet GetDifferencesFromSet(IRedisSet fromSet, params IRedisSet[] withSets) + { + var multiDataList = client.SDiff(fromSet.Id, withSets.Map(x => x.Id).ToArray()); + return CreateHashSet(multiDataList); + } + + public void StoreDifferencesFromSet(IRedisSet intoSet, IRedisSet fromSet, params IRedisSet[] withSets) + { + client.SDiffStore(intoSet.Id, fromSet.Id, withSets.Map(x => x.Id).ToArray()); + } + + public T GetRandomItemFromSet(IRedisSet fromSet) + { + return DeserializeValue(client.SRandMember(fromSet.Id)); + } + + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.Async.cs new file mode 100644 index 00000000..ab4ede2e --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis.Generic +{ + public partial class RedisTypedClient + { + internal partial class RedisClientSortedSets + : IHasNamed> + { + IRedisSortedSetAsync IHasNamed>.this[string setId] + { + get => new RedisClientSortedSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs index 70257419..03155420 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs @@ -1,283 +1,289 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // -using System; using System.Collections.Generic; -using System.Linq; -using ServiceStack.Common.Utils; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Model; using ServiceStack.Redis.Support; using ServiceStack.Text; -using ServiceStack.Common.Extensions; namespace ServiceStack.Redis.Generic { - public partial class RedisTypedClient - { - public IHasNamed> SortedSets { get; set; } - - internal class RedisClientSortedSets - : IHasNamed> - { - private readonly RedisTypedClient client; - - public RedisClientSortedSets(RedisTypedClient client) - { - this.client = client; - } - - public IRedisSortedSet this[string setId] - { - get - { - return new RedisClientSortedSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - public static T DeserializeFromString(string serializedObj) - { - return JsonSerializer.DeserializeFromString(serializedObj); - } - - private static IDictionary CreateGenericMap(IDictionary map) - { - var genericMap = new OrderedDictionary(); - foreach (var entry in map) - { - genericMap[DeserializeFromString(entry.Key)] = entry.Value; - } - return genericMap; - } - - public void AddItemToSortedSet(IRedisSortedSet toSet, T value) - { - client.AddItemToSortedSet(toSet.Id, value.SerializeToString()); - } - - public void AddItemToSortedSet(IRedisSortedSet toSet, T value, double score) - { - client.AddItemToSortedSet(toSet.Id, value.SerializeToString(), score); - } - - public bool RemoveItemFromSortedSet(IRedisSortedSet fromSet, T value) - { - return client.RemoveItemFromSortedSet(fromSet.Id, value.SerializeToString()); - } - - public T PopItemWithLowestScoreFromSortedSet(IRedisSortedSet fromSet) - { - return DeserializeFromString( - client.PopItemWithLowestScoreFromSortedSet(fromSet.Id)); - } - - public T PopItemWithHighestScoreFromSortedSet(IRedisSortedSet fromSet) - { - return DeserializeFromString( - client.PopItemWithHighestScoreFromSortedSet(fromSet.Id)); - } - - public bool SortedSetContainsItem(IRedisSortedSet set, T value) - { - return client.SortedSetContainsItem(set.Id, value.SerializeToString()); - } - - public double IncrementItemInSortedSet(IRedisSortedSet set, T value, double incrementBy) - { - return client.IncrementItemInSortedSet(set.Id, value.SerializeToString(), incrementBy); - } - - public int GetItemIndexInSortedSet(IRedisSortedSet set, T value) - { - return client.GetItemIndexInSortedSet(set.Id, value.SerializeToString()); - } - - public int GetItemIndexInSortedSetDesc(IRedisSortedSet set, T value) - { - return client.GetItemIndexInSortedSetDesc(set.Id, value.SerializeToString()); - } - - public List GetAllItemsFromSortedSet(IRedisSortedSet set) - { - var list = client.GetAllItemsFromSortedSet(set.Id); - return list.ConvertEachTo(); - } - - public List GetAllItemsFromSortedSetDesc(IRedisSortedSet set) - { - var list = client.GetAllItemsFromSortedSetDesc(set.Id); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSet(IRedisSortedSet set, int fromRank, int toRank) - { - var list = client.GetRangeFromSortedSet(set.Id, fromRank, toRank); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank) - { - var list = client.GetRangeFromSortedSetDesc(set.Id, fromRank, toRank); - return list.ConvertEachTo(); - } - - public IDictionary GetAllWithScoresFromSortedSet(IRedisSortedSet set) - { - var map = client.GetRangeWithScoresFromSortedSet(set.Id, FirstElement, LastElement); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSet(IRedisSortedSet set, int fromRank, int toRank) - { - var map = client.GetRangeWithScoresFromSortedSet(set.Id, fromRank, toRank); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank) - { - var map = client.GetRangeWithScoresFromSortedSetDesc(set.Id, fromRank, toRank); - return CreateGenericMap(map); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore, skip, take); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromScore, toScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromScore, toScore, skip, take); - return list.ConvertEachTo(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore, skip, take); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromScore, toScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromScore, toScore, skip, take); - return CreateGenericMap(map); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore, skip, take); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromScore, toScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromScore, toScore, take, skip); - return list.ConvertEachTo(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore, skip, take); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromScore, toScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromScore, toScore, skip, take); - return CreateGenericMap(map); - } - - public int RemoveRangeFromSortedSet(IRedisSortedSet set, int minRank, int maxRank) - { - return client.RemoveRangeFromSortedSet(set.Id, minRank, maxRank); - } - - public int RemoveRangeFromSortedSetByScore(IRedisSortedSet set, double fromScore, double toScore) - { - return client.RemoveRangeFromSortedSetByScore(set.Id, fromScore, toScore); - } - - public int GetSortedSetCount(IRedisSortedSet set) - { - return client.GetSortedSetCount(set.Id); - } - - public double GetItemScoreInSortedSet(IRedisSortedSet set, T value) - { - return client.GetItemScoreInSortedSet(set.Id, value.SerializeToString()); - } - - public int StoreIntersectFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds) - { - return client.StoreIntersectFromSortedSets(intoSetId.Id, setIds.ConvertAll(x => x.Id).ToArray()); - } - - public int StoreUnionFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds) - { - return client.StoreUnionFromSortedSets(intoSetId.Id, setIds.ConvertAll(x => x.Id).ToArray()); - } - } + public partial class RedisTypedClient + { + public IHasNamed> SortedSets { get; set; } + + internal partial class RedisClientSortedSets + : IHasNamed> + { + private readonly RedisTypedClient client; + + public RedisClientSortedSets(RedisTypedClient client) + { + this.client = client; + } + + public IRedisSortedSet this[string setId] + { + get + { + return new RedisClientSortedSet(client, setId); + } + set + { + var col = this[setId]; + col.Clear(); + col.CopyTo(value.ToArray(), 0); + } + } + } + + public static T DeserializeFromString(string serializedObj) + { + return JsonSerializer.DeserializeFromString(serializedObj); + } + + private static IDictionary CreateGenericMap(IDictionary map) + { + var genericMap = new OrderedDictionary(); + foreach (var entry in map) + { + genericMap[DeserializeFromString(entry.Key)] = entry.Value; + } + return genericMap; + } + + public void AddItemToSortedSet(IRedisSortedSet toSet, T value) + { + client.AddItemToSortedSet(toSet.Id, value.SerializeToString()); + } + + public void AddItemToSortedSet(IRedisSortedSet toSet, T value, double score) + { + client.AddItemToSortedSet(toSet.Id, value.SerializeToString(), score); + } + + public bool RemoveItemFromSortedSet(IRedisSortedSet fromSet, T value) + { + return client.RemoveItemFromSortedSet(fromSet.Id, value.SerializeToString()); + } + + public T PopItemWithLowestScoreFromSortedSet(IRedisSortedSet fromSet) + { + return DeserializeFromString( + client.PopItemWithLowestScoreFromSortedSet(fromSet.Id)); + } + + public T PopItemWithHighestScoreFromSortedSet(IRedisSortedSet fromSet) + { + return DeserializeFromString( + client.PopItemWithHighestScoreFromSortedSet(fromSet.Id)); + } + + public bool SortedSetContainsItem(IRedisSortedSet set, T value) + { + return client.SortedSetContainsItem(set.Id, value.SerializeToString()); + } + + public double IncrementItemInSortedSet(IRedisSortedSet set, T value, double incrementBy) + { + return client.IncrementItemInSortedSet(set.Id, value.SerializeToString(), incrementBy); + } + + public long GetItemIndexInSortedSet(IRedisSortedSet set, T value) + { + return client.GetItemIndexInSortedSet(set.Id, value.SerializeToString()); + } + + public long GetItemIndexInSortedSetDesc(IRedisSortedSet set, T value) + { + return client.GetItemIndexInSortedSetDesc(set.Id, value.SerializeToString()); + } + + public List GetAllItemsFromSortedSet(IRedisSortedSet set) + { + var list = client.GetAllItemsFromSortedSet(set.Id); + return list.ConvertEachTo(); + } + + public List GetAllItemsFromSortedSetDesc(IRedisSortedSet set) + { + var list = client.GetAllItemsFromSortedSetDesc(set.Id); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSet(IRedisSortedSet set, int fromRank, int toRank) + { + var list = client.GetRangeFromSortedSet(set.Id, fromRank, toRank); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank) + { + var list = client.GetRangeFromSortedSetDesc(set.Id, fromRank, toRank); + return list.ConvertEachTo(); + } + + public IDictionary GetAllWithScoresFromSortedSet(IRedisSortedSet set) + { + var map = client.GetRangeWithScoresFromSortedSet(set.Id, FirstElement, LastElement); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSet(IRedisSortedSet set, int fromRank, int toRank) + { + var map = client.GetRangeWithScoresFromSortedSet(set.Id, fromRank, toRank); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank) + { + var map = client.GetRangeWithScoresFromSortedSetDesc(set.Id, fromRank, toRank); + return CreateGenericMap(map); + } + + public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) + { + var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) + { + var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore, skip, take); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore) + { + var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromScore, toScore); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) + { + var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromScore, toScore, skip, take); + return list.ConvertEachTo(); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) + { + var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) + { + var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore, skip, take); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore) + { + var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromScore, toScore); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) + { + var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromScore, toScore, skip, take); + return CreateGenericMap(map); + } + + public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) + { + var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) + { + var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore, skip, take); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore) + { + var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromScore, toScore); + return list.ConvertEachTo(); + } + + public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) + { + var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromScore, toScore, skip, take); + return list.ConvertEachTo(); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) + { + var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) + { + var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore, skip, take); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore) + { + var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromScore, toScore); + return CreateGenericMap(map); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) + { + var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromScore, toScore, skip, take); + return CreateGenericMap(map); + } + + public long RemoveRangeFromSortedSet(IRedisSortedSet set, int minRank, int maxRank) + { + return client.RemoveRangeFromSortedSet(set.Id, minRank, maxRank); + } + + public long RemoveRangeFromSortedSetByScore(IRedisSortedSet set, double fromScore, double toScore) + { + return client.RemoveRangeFromSortedSetByScore(set.Id, fromScore, toScore); + } + + public long GetSortedSetCount(IRedisSortedSet set) + { + return client.GetSortedSetCount(set.Id); + } + + public double GetItemScoreInSortedSet(IRedisSortedSet set, T value) + { + return client.GetItemScoreInSortedSet(set.Id, value.SerializeToString()); + } + + public long StoreIntersectFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds) + { + return client.StoreIntersectFromSortedSets(intoSetId.Id, setIds.Map(x => x.Id).ToArray()); + } + + public long StoreIntersectFromSortedSets(IRedisSortedSet intoSetId, IRedisSortedSet[] setIds, string[] args) + { + return client.StoreIntersectFromSortedSets(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), args); + } + + public long StoreUnionFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds) + { + return client.StoreUnionFromSortedSets(intoSetId.Id, setIds.Map(x => x.Id).ToArray()); + } + + public long StoreUnionFromSortedSets(IRedisSortedSet intoSetId, IRedisSortedSet[] setIds, string[] args) + { + return client.StoreUnionFromSortedSets(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), args); + } + } } diff --git a/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs b/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs index 976879f0..00989112 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Linq; using ServiceStack.Text; namespace ServiceStack.Redis.Generic @@ -12,249 +13,292 @@ public class RedisTypedCommandQueue : RedisQueueCompletableOperation { internal readonly RedisTypedClient RedisClient; internal RedisTypedCommandQueue(RedisTypedClient redisClient) - { - RedisClient = redisClient; - - } - - public void QueueCommand(Action> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Action> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Action> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + { + RedisClient = redisClient; + + } + + public void QueueCommand(Action> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Action> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Action> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { VoidReturnCommand = command, - OnSuccessVoidCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, int> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, int> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, int> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessVoidCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, int> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, int> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, int> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { IntReturnCommand = command, - OnSuccessIntCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, long> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, long> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, long> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessIntCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, long> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, long> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, long> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { LongReturnCommand = command, - OnSuccessLongCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, bool> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, bool> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, bool> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessLongCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, bool> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, bool> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, bool> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { BoolReturnCommand = command, - OnSuccessBoolCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, double> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, double> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, double> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessBoolCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, double> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, double> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, double> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { DoubleReturnCommand = command, - OnSuccessDoubleCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, byte[]> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, byte[]> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, byte[]> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessDoubleCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, byte[]> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, byte[]> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, byte[]> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { BytesReturnCommand = command, - OnSuccessBytesCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, string> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, string> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, string> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, string> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, string> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, string> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { StringReturnCommand = command, - OnSuccessStringCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - public void QueueCommand(Func, T> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, T> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, T> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + public void QueueCommand(Func, T> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, T> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, T> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { ObjectReturnCommand = command, - OnSuccessTypeCallback = x => onSuccessCallback(JsonSerializer.DeserializeFromString(x)), - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, byte[][]> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, byte[][]> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, byte[][]> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessTypeCallback = x => onSuccessCallback(JsonSerializer.DeserializeFromString(x)), + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, byte[][]> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, byte[][]> command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, byte[][]> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { MultiBytesReturnCommand = command, - OnSuccessMultiBytesCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - - public void QueueCommand(Func, List> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessMultiBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, List> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, List> command, Action> onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { MultiStringReturnCommand = command, - OnSuccessMultiStringCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } - - public void QueueCommand(Func, List> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisTypedCommand - { + OnSuccessMultiStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + public void QueueCommand(Func, List> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, List> command, Action> onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { MultiObjectReturnCommand = command, - OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(y => JsonSerializer.DeserializeFromString(y))), - OnErrorCallback = onErrorCallback - }); - command(RedisClient); - } + OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(y => JsonSerializer.DeserializeFromString(y))), + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func, HashSet> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, HashSet> command, Action> onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, HashSet> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + MultiStringReturnCommand = r => command(r).ToList(), + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + public void QueueCommand(Func, HashSet> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func, HashSet> command, Action> onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func, HashSet> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + MultiObjectReturnCommand = r => command(r).ToList(), + OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(JsonSerializer.DeserializeFromString).ToSet()), + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } } } diff --git a/src/ServiceStack.Redis/Generic/RedisTypedPipeline.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.Async.cs new file mode 100644 index 00000000..2b571235 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.Async.cs @@ -0,0 +1,265 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + /// + /// Pipeline for redis typed client + /// + /// + public partial class RedisTypedPipeline + : IRedisTypedPipelineAsync + { + private IRedisTypedPipelineAsync AsAsync() => this; + void IRedisQueueCompletableOperationAsync.CompleteBytesQueuedCommandAsync(Func> bytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(bytesReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteDoubleQueuedCommandAsync(Func> doubleReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(doubleReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteIntQueuedCommandAsync(Func> intReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(intReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteLongQueuedCommandAsync(Func> longReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(longReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiBytesQueuedCommandAsync(Func> multiBytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiBytesReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiStringQueuedCommandAsync(Func>> multiStringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiStringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteRedisDataQueuedCommandAsync(Func> redisDataReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(redisDataReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteStringQueuedCommandAsync(Func> stringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(stringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteVoidQueuedCommandAsync(Func voidReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(voidReadCommand); + AddCurrentQueuedOperation(); + } + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + async ValueTask IRedisPipelineSharedAsync.FlushAsync(CancellationToken token) + { + try + { + // flush send buffers + await RedisClient.FlushSendBufferAsync(token).ConfigureAwait(false); + RedisClient.ResetSendBuffer(); + + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + + } + finally + { + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessVoidCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessIntCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessLongCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessBoolCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessDoubleCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessTypeCallback = x => onSuccessCallback(JsonSerializer.DeserializeFromString(x)), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessMultiStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(async r => + { + var result = await command(r).ConfigureAwait(false); + return result.ToList(); + })); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(JsonSerializer.DeserializeFromString)), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + async ValueTask IRedisPipelineSharedAsync.ReplayAsync(CancellationToken token) + { + RedisClient.Pipeline = this; + // execute + foreach (var queuedCommand in QueuedCommands) + { + if (queuedCommand is QueuedRedisTypedCommand cmd) + await cmd.ExecuteAsync(RedisClient).ConfigureAwait(false); + } + await AsAsync().FlushAsync(token).ConfigureAwait(false); + return true; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs index 52ec2393..86e42af1 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs @@ -3,31 +3,31 @@ namespace ServiceStack.Redis { - /// - /// Pipeline for redis typed client - /// - /// - public class RedisTypedPipeline : RedisTypedCommandQueue, IRedisTypedPipeline - { - internal RedisTypedPipeline(RedisTypedClient redisClient) - : base(redisClient) - { - Init(); - } + /// + /// Pipeline for redis typed client + /// + /// + public partial class RedisTypedPipeline : RedisTypedCommandQueue, IRedisTypedPipeline + { + internal RedisTypedPipeline(RedisTypedClient redisClient) + : base(redisClient) + { + Init(); + } protected virtual void Init() { - if (RedisClient.Transaction != null) + if (RedisClient.Transaction != null) throw new InvalidOperationException("A transaction is already in use"); - if (RedisClient.Pipeline != null) - throw new InvalidOperationException("A pipeline is already in use"); - - RedisClient.Pipeline = this; + if (RedisClient.Pipeline != null) + throw new InvalidOperationException("A pipeline is already in use"); + RedisClient.Pipeline = this; } - public void Flush() - { + + public void Flush() + { try { @@ -47,34 +47,33 @@ public void Flush() ClosePipeline(); RedisClient.AddTypeIdsRegisteredDuringPipeline(); } - } + } + protected void Execute() { foreach (var queuedCommand in QueuedCommands) { - var cmd = queuedCommand as QueuedRedisTypedCommand; - if (cmd != null) + if (queuedCommand is QueuedRedisTypedCommand cmd) cmd.Execute(RedisClient); } } - public bool Replay() - { - RedisClient.Pipeline = this; - Execute(); + public virtual bool Replay() + { + RedisClient.Pipeline = this; + Execute(); Flush(); - return true; - } + return true; + } - protected void ClosePipeline() - { - RedisClient.ResetSendBuffer(); - RedisClient.Pipeline = null; - } + protected void ClosePipeline() + { + RedisClient.EndPipeline(); + } - public void Dispose() - { - ClosePipeline(); - } - } + public virtual void Dispose() + { + ClosePipeline(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedTransaction.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.Async.cs new file mode 100644 index 00000000..33375094 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.Async.cs @@ -0,0 +1,94 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Pipeline; + +namespace ServiceStack.Redis.Generic +{ + /// + /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). + /// + internal partial class RedisTypedTransaction + : IRedisTypedTransactionAsync, IRedisTransactionBaseAsync + { + async ValueTask IRedisTypedTransactionAsync.CommitAsync(CancellationToken token) + { + bool rc = true; + try + { + _numCommands = QueuedCommands.Count / 2; + + //insert multi command at beginning + QueuedCommands.Insert(0, new QueuedRedisCommand() + { + }.WithAsyncReturnCommand(VoidReturnCommandAsync: r => { Init(); return default; }) + .WithAsyncReadCommand(RedisClient.ExpectOkAsync)); + + //the first half of the responses will be "QUEUED", + // so insert reading of multiline after these responses + QueuedCommands.Insert(_numCommands + 1, new QueuedRedisOperation() + { + OnSuccessIntCallback = handleMultiDataResultCount + }.WithAsyncReadCommand(RedisClient.ReadMultiDataResultCountAsync)); + + // add Exec command at end (not queued) + QueuedCommands.Add(new RedisCommand() + { + }.WithAsyncReturnCommand(r => ExecAsync(token))); + + //execute transaction + await ExecAsync(token).ConfigureAwait(false); + + ///////////////////////////// + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (RedisTransactionFailedException) + { + rc = false; + } + finally + { + RedisClient.Transaction = null; + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + return rc; + } + + private ValueTask ExecAsync(CancellationToken token) + { + RedisClient.Exec(); + return RedisClient.FlushSendBufferAsync(token); + } + + ValueTask IRedisTypedTransactionAsync.RollbackAsync(CancellationToken token) + { + Rollback(); // no async bits needed + return default; + } + + partial void QueueExpectQueuedAsync() + { + QueuedCommands.Insert(0, new QueuedRedisOperation + { + }.WithAsyncReadCommand(RedisClient.ExpectQueuedAsync)); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs index a62c205a..989ef1f4 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -16,18 +16,21 @@ namespace ServiceStack.Redis.Generic { - /// - /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - /// - internal class RedisTypedTransaction - : RedisTypedPipeline, IRedisTypedTransaction, IRedisTransactionBase - { - private int _numCommands = 0; - internal RedisTypedTransaction(RedisTypedClient redisClient) - : base(redisClient) - { - - } + /// + /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). + /// + internal partial class RedisTypedTransaction + : RedisTypedPipeline, IRedisTypedTransaction, IRedisTransactionBase + { + private int _numCommands = 0; + private readonly bool _isAsync; + internal RedisTypedTransaction(RedisTypedClient redisClient, bool isAsync) + : base(redisClient) + { + // if someone casts between sync/async: the sync-over-async or + // async-over-sync is entirely self-inflicted; I can't fix stupid + _isAsync = isAsync; + } protected override void Init() { @@ -59,7 +62,6 @@ private void Exec() { RedisClient.Exec(); RedisClient.FlushSendBuffer(); - } public bool Commit() @@ -76,7 +78,6 @@ public bool Commit() VoidReadCommand = RedisClient.ExpectOk, }); - //the first half of the responses will be "QUEUED", // so insert reading of multiline after these responses QueuedCommands.Insert(_numCommands + 1, new QueuedRedisOperation() @@ -101,7 +102,7 @@ public bool Commit() queuedCommand.ProcessResult(); } } - catch (RedisTransactionFailedException e) + catch (RedisTransactionFailedException) { rc = false; } @@ -120,10 +121,9 @@ public bool Commit() private void handleMultiDataResultCount(int count) { if (count != _numCommands) - throw new InvalidOperationException(string.Format( - "Invalid results received from 'EXEC', expected '{0}' received '{1}'" - + "\nWarning: Transaction was committed", - _numCommands, count)); + throw new InvalidOperationException( + $"Invalid results received from 'EXEC', expected '{_numCommands}' received '{count}'" + + "\nWarning: Transaction was committed"); } public void Rollback() @@ -135,7 +135,7 @@ public void Rollback() RedisClient.ClearTypeIdsRegisteredDuringPipeline(); } - public bool Replay() + public override bool Replay() { bool rc = true; try @@ -163,7 +163,7 @@ public bool Replay() return rc; } - public void Dispose() + public override void Dispose() { base.Dispose(); if (RedisClient.Transaction == null) return; @@ -175,8 +175,16 @@ public void Dispose() protected override void AddCurrentQueuedOperation() { base.AddCurrentQueuedOperation(); - QueueExpectQueued(); + if (_isAsync) + { + QueueExpectQueuedAsync(); + } + else + { + QueueExpectQueued(); + } } #endregion + partial void QueueExpectQueuedAsync(); } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/IHandleClientDispose.cs b/src/ServiceStack.Redis/IHandleClientDispose.cs new file mode 100644 index 00000000..a05ddbcb --- /dev/null +++ b/src/ServiceStack.Redis/IHandleClientDispose.cs @@ -0,0 +1,7 @@ +namespace ServiceStack.Redis +{ + public interface IHandleClientDispose + { + void DisposeClient(RedisNativeClient client); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/IRedisClientFactory.cs b/src/ServiceStack.Redis/IRedisClientFactory.cs deleted file mode 100644 index fe0e1b90..00000000 --- a/src/ServiceStack.Redis/IRedisClientFactory.cs +++ /dev/null @@ -1,21 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Net; - -namespace ServiceStack.Redis -{ - public interface IRedisClientFactory - { - RedisClient CreateRedisClient(string host, int port); - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/IRedisFailover.cs b/src/ServiceStack.Redis/IRedisFailover.cs new file mode 100644 index 00000000..4424acdd --- /dev/null +++ b/src/ServiceStack.Redis/IRedisFailover.cs @@ -0,0 +1,14 @@ +using System; +using System.Collections.Generic; + +namespace ServiceStack.Redis +{ + public interface IRedisFailover + { + List> OnFailover { get; } + + void FailoverTo(params string[] readWriteHosts); + + void FailoverTo(IEnumerable readWriteHosts, IEnumerable readOnlyHosts); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/IRedisResolver.cs b/src/ServiceStack.Redis/IRedisResolver.cs new file mode 100644 index 00000000..95b663df --- /dev/null +++ b/src/ServiceStack.Redis/IRedisResolver.cs @@ -0,0 +1,53 @@ +using System; +using System.Collections.Generic; + +namespace ServiceStack.Redis +{ + /// + /// Resolver strategy for resolving hosts and creating clients + /// + public interface IRedisResolver + { + Func ClientFactory { get; set; } + + int ReadWriteHostsCount { get; } + int ReadOnlyHostsCount { get; } + + void ResetMasters(IEnumerable hosts); + void ResetSlaves(IEnumerable hosts); + + RedisClient CreateMasterClient(int desiredIndex); + RedisClient CreateSlaveClient(int desiredIndex); + } + + public interface IRedisResolverExtended : IRedisResolver + { + RedisClient CreateRedisClient(RedisEndpoint config, bool master); + + RedisEndpoint GetReadWriteHost(int desiredIndex); + RedisEndpoint GetReadOnlyHost(int desiredIndex); + } + + public interface IHasRedisResolver + { + IRedisResolver RedisResolver { get; set; } + } + + public static class RedisResolverExtensions + { + public static RedisClient CreateRedisClient(this IRedisResolver resolver, RedisEndpoint config, bool master) + { + return ((IRedisResolverExtended)resolver).CreateRedisClient(config, master); + } + + public static RedisEndpoint GetReadWriteHost(this IRedisResolver resolver, int desiredIndex) + { + return ((IRedisResolverExtended)resolver).GetReadWriteHost(desiredIndex); + } + + public static RedisEndpoint GetReadOnlyHost(this IRedisResolver resolver, int desiredIndex) + { + return ((IRedisResolverExtended)resolver).GetReadOnlyHost(desiredIndex); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/IRedisSentinel.cs b/src/ServiceStack.Redis/IRedisSentinel.cs new file mode 100644 index 00000000..124b7357 --- /dev/null +++ b/src/ServiceStack.Redis/IRedisSentinel.cs @@ -0,0 +1,8 @@ +using System; +namespace ServiceStack.Redis +{ + public interface IRedisSentinel : IDisposable + { + IRedisClientsManager Start(); + } +} diff --git a/src/ServiceStack.Redis/Messaging/MessageHandlerWorker.cs b/src/ServiceStack.Redis/Messaging/MessageHandlerWorker.cs deleted file mode 100644 index bf1365e5..00000000 --- a/src/ServiceStack.Redis/Messaging/MessageHandlerWorker.cs +++ /dev/null @@ -1,235 +0,0 @@ -using System; -using System.Threading; -using ServiceStack.Logging; -using ServiceStack.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Messaging -{ - internal class MessageHandlerWorker : IDisposable - { - private static readonly ILog Log = LogManager.GetLogger(typeof(MessageHandlerWorker)); - - readonly object msgLock = new object(); - - private readonly IMessageHandler messageHandler; - private readonly IRedisClientsManager clientsManager; - - public string QueueName { get; set; } - - private int status; - public int Status - { - get { return status; } - } - - private Thread bgThread; - private int timesStarted = 0; - private bool receivedNewMsgs = false; - public Action errorHandler { get; set; } - - private DateTime lastMsgProcessed; - public DateTime LastMsgProcessed - { - get { return lastMsgProcessed; } - } - - private int totalMessagesProcessed; - public int TotalMessagesProcessed - { - get { return totalMessagesProcessed; } - } - - private int msgNotificationsReceived; - public int MsgNotificationsReceived - { - get { return msgNotificationsReceived; } - } - - public MessageHandlerWorker( - IRedisClientsManager clientsManager, IMessageHandler messageHandler, string queueName, - Action errorHandler) - { - this.clientsManager = clientsManager; - this.messageHandler = messageHandler; - this.QueueName = queueName; - this.errorHandler = errorHandler; - } - - public MessageHandlerWorker Clone() - { - return new MessageHandlerWorker(clientsManager, messageHandler, QueueName, errorHandler); - } - - public void NotifyNewMessage() - { - Interlocked.Increment(ref msgNotificationsReceived); - - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Started) - { - if (Monitor.TryEnter(msgLock)) - { - Monitor.Pulse(msgLock); - Monitor.Exit(msgLock); - } - else - { - receivedNewMsgs = true; - } - } - } - - public void Start() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Started) - return; - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - throw new ObjectDisposedException("MQ Host has been disposed"); - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Stopping) - KillBgThreadIfExists(); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Starting, WorkerStatus.Stopped) == WorkerStatus.Stopped) - { - Log.Debug("Starting MQ Handler Worker: {0}...".Fmt(QueueName)); - - //Should only be 1 thread past this point - bgThread = new Thread(Run) { - Name = "{0}: {1}".Fmt(GetType().Name, QueueName), - IsBackground = true, - }; - bgThread.Start(); - } - } - - public void ForceRestart() - { - KillBgThreadIfExists(); - Start(); - } - - private void Run() - { - if (Interlocked.CompareExchange(ref status, WorkerStatus.Started, WorkerStatus.Starting) != WorkerStatus.Starting) return; - timesStarted++; - - try - { - lock (msgLock) - { - while (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Started) - { - receivedNewMsgs = false; - - using (var mqClient = new RedisMessageQueueClient(clientsManager)) - { - var msgsProcessedThisTime = messageHandler.ProcessQueue(mqClient, QueueName, - () => Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Started); - - totalMessagesProcessed += msgsProcessedThisTime; - - if (msgsProcessedThisTime > 0) - lastMsgProcessed = DateTime.UtcNow; - } - - if (!receivedNewMsgs) - Monitor.Wait(msgLock); - } - } - } - catch (Exception ex) - { - //Ignore handling rare, but expected exceptions from KillBgThreadIfExists() - if (ex is ThreadInterruptedException || ex is ThreadAbortException) - { - Log.Warn("Received {0} in Worker: {1}".Fmt(ex.GetType().Name, QueueName)); - return; - } - - Stop(); - if (this.errorHandler != null) this.errorHandler(this, ex); - } - finally - { - //If it's in an invalid state, Dispose() this worker. - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Stopping) != WorkerStatus.Stopping) - { - Dispose(); - } - } - } - - public void Stop() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - return; - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopping, WorkerStatus.Started) == WorkerStatus.Started) - { - Log.Debug("Stopping MQ Handler Worker: {0}...".Fmt(QueueName)); - Thread.Sleep(100); - lock (msgLock) - { - Monitor.Pulse(msgLock); - } - } - } - - private void KillBgThreadIfExists() - { - try - { - if (bgThread != null && bgThread.IsAlive) - { - //give it a small chance to die gracefully - if (!bgThread.Join(500)) - { - //Ideally we shouldn't get here, but lets try our hardest to clean it up - Log.Warn("Interrupting previous Background Worker: " + bgThread.Name); - bgThread.Interrupt(); - if (!bgThread.Join(TimeSpan.FromSeconds(3))) - { - Log.Warn(bgThread.Name + " just wont die, so we're now aborting it..."); - bgThread.Abort(); - } - } - } - } - finally - { - bgThread = null; - status = WorkerStatus.Stopped; - } - } - - public virtual void Dispose() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - return; - - Stop(); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Disposed, WorkerStatus.Stopped) != WorkerStatus.Stopped) - Interlocked.CompareExchange(ref status, WorkerStatus.Disposed, WorkerStatus.Stopping); - - try - { - KillBgThreadIfExists(); - } - catch (Exception ex) - { - Log.Error("Error Disposing MessageHandlerWorker for: " + QueueName, ex); - } - } - - public IMessageHandlerStats GetStats() - { - return messageHandler.GetStats(); - } - - public string GetStatus() - { - return "[Worker: {0}, Status: {1}, ThreadStatus: {2}, LastMsgAt: {3}]" - .Fmt(QueueName, WorkerStatus.ToString(status), bgThread.ThreadState, LastMsgProcessed); - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisMessageFactory.cs b/src/ServiceStack.Redis/Messaging/RedisMessageFactory.cs deleted file mode 100644 index c7d96bbf..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMessageFactory.cs +++ /dev/null @@ -1,28 +0,0 @@ -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageFactory : IMessageFactory - { - private readonly IRedisClientsManager clientsManager; - - public RedisMessageFactory(IRedisClientsManager clientsManager) - { - this.clientsManager = clientsManager; - } - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient(clientsManager); - } - - public IMessageProducer CreateMessageProducer() - { - return new RedisMessageProducer(clientsManager); - } - - public void Dispose() - { - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisMessageProducer.cs b/src/ServiceStack.Redis/Messaging/RedisMessageProducer.cs deleted file mode 100644 index 52285a36..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMessageProducer.cs +++ /dev/null @@ -1,75 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageProducer - : IMessageProducer - { - private readonly IRedisClientsManager clientsManager; - private readonly Action onPublishedCallback; - - public RedisMessageProducer(IRedisClientsManager clientsManager) - : this(clientsManager, null) {} - - public RedisMessageProducer(IRedisClientsManager clientsManager, Action onPublishedCallback) - { - this.clientsManager = clientsManager; - this.onPublishedCallback = onPublishedCallback; - } - - private IRedisNativeClient readWriteClient; - public IRedisNativeClient ReadWriteClient - { - get - { - if (this.readWriteClient == null) - { - this.readWriteClient = (IRedisNativeClient)clientsManager.GetClient(); - } - return readWriteClient; - } - } - - public void Publish(T messageBody) - { - if (typeof(IMessage).IsAssignableFrom(typeof(T))) - Publish((IMessage)messageBody); - else - Publish((IMessage)new Message(messageBody)); - } - - public void Publish(IMessage message) - { - var messageBytes = message.ToBytes(); - this.ReadWriteClient.LPush(message.ToInQueueName(), messageBytes); - this.ReadWriteClient.Publish(QueueNames.TopicIn, message.ToInQueueName().ToUtf8Bytes()); - - if (onPublishedCallback != null) - { - onPublishedCallback(); - } - } - - public void Dispose() - { - if (readWriteClient != null) - { - readWriteClient.Dispose(); - } - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisMessageQueueClient.cs b/src/ServiceStack.Redis/Messaging/RedisMessageQueueClient.cs deleted file mode 100644 index 4fff6af3..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMessageQueueClient.cs +++ /dev/null @@ -1,139 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageQueueClient - : IMessageQueueClient - { - private readonly Action onPublishedCallback; - private readonly IRedisClientsManager clientsManager; - - public int MaxSuccessQueueSize { get; set; } - - public RedisMessageQueueClient(IRedisClientsManager clientsManager) - : this(clientsManager, null) {} - - public RedisMessageQueueClient( - IRedisClientsManager clientsManager, Action onPublishedCallback) - { - this.onPublishedCallback = onPublishedCallback; - this.clientsManager = clientsManager; - this.MaxSuccessQueueSize = 100; - } - - private IRedisNativeClient readWriteClient; - public IRedisNativeClient ReadWriteClient - { - get - { - if (this.readWriteClient == null) - { - this.readWriteClient = (IRedisNativeClient)clientsManager.GetClient(); - } - return readWriteClient; - } - } - - private IRedisNativeClient readOnlyClient; - public IRedisNativeClient ReadOnlyClient - { - get - { - if (this.readOnlyClient == null) - { - this.readOnlyClient = (IRedisNativeClient)clientsManager.GetReadOnlyClient(); - } - return readOnlyClient; - } - } - - public void Publish(T messageBody) - { - if (typeof(IMessage).IsAssignableFrom(typeof(T))) - Publish((IMessage)messageBody); - else - Publish(new Message(messageBody)); - } - - public void Publish(IMessage message) - { - var messageBytes = message.ToBytes(); - Publish(message.ToInQueueName(), messageBytes); - } - - public void Publish(IMessage message) - { - var messageBytes = message.ToBytes(); - Publish(message.ToInQueueName(), messageBytes); - } - - public void Publish(string queueName, byte[] messageBytes) - { - this.ReadWriteClient.LPush(queueName, messageBytes); - this.ReadWriteClient.Publish(QueueNames.TopicIn, queueName.ToUtf8Bytes()); - - if (onPublishedCallback != null) - { - onPublishedCallback(); - } - } - - public void Notify(string queueName, byte[] messageBytes) - { - this.ReadWriteClient.LPush(queueName, messageBytes); - this.ReadWriteClient.LTrim(queueName, 0, this.MaxSuccessQueueSize); - this.ReadWriteClient.Publish(QueueNames.TopicOut, queueName.ToUtf8Bytes()); - } - - public byte[] Get(string queueName, TimeSpan? timeOut) - { - var unblockingKeyAndValue = this.ReadOnlyClient.BRPop(queueName, (int) timeOut.GetValueOrDefault().TotalSeconds); - return unblockingKeyAndValue.Length != 2 - ? null - : unblockingKeyAndValue[1]; - } - - public byte[] GetAsync(string queueName) - { - return this.ReadOnlyClient.RPop(queueName); - } - - public string WaitForNotifyOnAny(params string[] channelNames) - { - string result = null; - var subscription = new RedisSubscription(readOnlyClient); - subscription.OnMessage = (channel, msg) => { - result = msg; - subscription.UnSubscribeFromAllChannels(); - }; - subscription.SubscribeToChannels(channelNames); //blocks - return result; - } - - public void Dispose() - { - if (this.readOnlyClient != null) - { - this.readOnlyClient.Dispose(); - } - if (this.readWriteClient != null) - { - this.readWriteClient.Dispose(); - } - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisMessageQueueClientFactory.cs b/src/ServiceStack.Redis/Messaging/RedisMessageQueueClientFactory.cs deleted file mode 100644 index 138e866b..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMessageQueueClientFactory.cs +++ /dev/null @@ -1,41 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageQueueClientFactory - : IMessageQueueClientFactory - { - private readonly Action onPublishedCallback; - private readonly IRedisClientsManager clientsManager; - - public RedisMessageQueueClientFactory( - IRedisClientsManager clientsManager, Action onPublishedCallback) - { - this.onPublishedCallback = onPublishedCallback; - this.clientsManager = clientsManager; - } - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient( - this.clientsManager, this.onPublishedCallback); - } - - public void Dispose() - { - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisMqHost.cs b/src/ServiceStack.Redis/Messaging/RedisMqHost.cs deleted file mode 100644 index a14a0aa5..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMqHost.cs +++ /dev/null @@ -1,350 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading; -using ServiceStack.Logging; -using ServiceStack.Messaging; -using ServiceStack.Service; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Messaging -{ - /// - /// Creates an MQ Host that processes all messages on a single background thread. - /// i.e. If you register 3 handlers it will only create 1 background thread. - /// - /// The same background thread that listens to the Redis MQ Subscription for new messages - /// also cycles through each registered handler processing all pending messages one-at-a-time: - /// first in the message PriorityQ, then in the normal message InQ. - /// - /// The Start/Stop methods are idempotent i.e. It's safe to call them repeatedly on multiple threads - /// and the Redis MQ Host will only have Started/Stopped once. - /// - public class RedisMqHost : IMessageService - { - private static readonly ILog Log = LogManager.GetLogger(typeof(RedisMqHost)); - public const int DefaultRetryCount = 2; //Will be a total of 3 attempts - - public IMessageFactory MessageFactory { get; private set; } - - readonly Random rand = new Random(Environment.TickCount); - private void SleepBackOffMultiplier(int continuousErrorsCount) - { - if (continuousErrorsCount == 0) return; - const int MaxSleepMs = 60 * 1000; - - //exponential/random retry back-off. - var nextTry = Math.Min( - rand.Next((int)Math.Pow(continuousErrorsCount, 3), (int)Math.Pow(continuousErrorsCount + 1, 3) + 1), - MaxSleepMs); - - Log.Debug("Sleeping for {0}ms after {1} continuous errors".Fmt(nextTry, continuousErrorsCount)); - - Thread.Sleep(nextTry); - } - - //Stats - private long timesStarted = 0; - private long noOfErrors = 0; - private int noOfContinuousErrors = 0; - private string lastExMsg = null; - private int status; - - private long bgThreadCount = 0; - public long BgThreadCount - { - get { return Interlocked.CompareExchange(ref bgThreadCount, 0, 0); } - } - - public int RetryCount { get; protected set; } - public TimeSpan? RequestTimeOut { get; protected set; } - - /// - /// Inject your own Reply Client Factory to handle custom Message.ReplyTo urls. - /// - public Func ReplyClientFactory { get; set; } - - public Func RequestFilter { get; set; } - public Func ResponseFilter { get; set; } - - public Action ErrorHandler { get; set; } - - private readonly IRedisClientsManager clientsManager; //Thread safe redis client/conn factory - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient(this.clientsManager); - } - - public RedisMqHost(IRedisClientsManager clientsManager, - int retryCount = DefaultRetryCount, TimeSpan? requestTimeOut = null) - { - this.clientsManager = clientsManager; - this.RetryCount = retryCount; - this.RequestTimeOut = requestTimeOut; - this.MessageFactory = new RedisMessageFactory(clientsManager); - this.ErrorHandler = ex => Log.Error("Exception in Background Thread: " + ex.Message, ex); - } - - private readonly Dictionary handlerMap - = new Dictionary(); - - private IMessageHandler[] messageHandlers; - private string[] inQueueNames; - - public string Title - { - get { return string.Join(", ", inQueueNames); } - } - - public void RegisterHandler(Func, object> processMessageFn) - { - RegisterHandler(processMessageFn, null); - } - - public void RegisterHandler(Func, object> processMessageFn, Action, Exception> processExceptionEx) - { - if (handlerMap.ContainsKey(typeof(T))) - { - throw new ArgumentException("Message handler has already been registered for type: " + typeof(T).Name); - } - - handlerMap[typeof(T)] = CreateMessageHandlerFactory(processMessageFn, processExceptionEx); - } - - protected IMessageHandlerFactory CreateMessageHandlerFactory(Func, object> processMessageFn, Action, Exception> processExceptionEx) - { - return new MessageHandlerFactory(this, processMessageFn, processExceptionEx) { - RequestFilter = this.RequestFilter, - ResponseFilter = this.ResponseFilter, - RetryCount = RetryCount, - }; - } - - private void RunLoop() - { - if (Interlocked.CompareExchange(ref status, WorkerStatus.Started, WorkerStatus.Starting) != WorkerStatus.Starting) return; - Interlocked.Increment(ref timesStarted); - - try - { - while (true) - { - //Pass in a new MQ Client that may be used by message handlers - using (var mqClient = CreateMessageQueueClient()) - { - foreach (var handler in messageHandlers) - { - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Stopping) == WorkerStatus.Stopping) - { - Log.Debug("MQ Host is stopping, exiting RunLoop()..."); - return; - } - if (Interlocked.CompareExchange(ref status, 0, 0) != WorkerStatus.Started) - { - Log.Error("MQ Host is in an invalid state '{0}', exiting RunLoop()...".Fmt(GetStatus())); - return; - } - handler.Process(mqClient); - } - - //Record that we had a good run... - Interlocked.CompareExchange(ref noOfContinuousErrors, 0, noOfContinuousErrors); - - var cmd = mqClient.WaitForNotifyOnAny(QueueNames.TopicIn); - if (cmd == WorkerStatus.StopCommand) - { - Log.Debug("Stop Command Issued"); - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Started) != WorkerStatus.Started) - Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Stopping); - - return; - } - } - } - } - catch (Exception ex) - { - lastExMsg = ex.Message; - Interlocked.Increment(ref noOfErrors); - Interlocked.Increment(ref noOfContinuousErrors); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Started) != WorkerStatus.Started) - Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Stopping); - - if (this.ErrorHandler != null) this.ErrorHandler(ex); - } - } - - private Thread bgThread; - - public virtual void Start() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Started) return; - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - throw new ObjectDisposedException("MQ Host has been disposed"); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Starting, WorkerStatus.Stopped) == WorkerStatus.Stopped) //Should only be 1 thread past this point - { - try - { - Init(); - - if (this.messageHandlers == null || this.messageHandlers.Length == 0) - { - Log.Warn("Cannot start a MQ Host with no Message Handlers registered, ignoring."); - Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Starting); - return; - } - - SleepBackOffMultiplier(Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); - - KillBgThreadIfExists(); - - bgThread = new Thread(RunLoop) { - IsBackground = true, - Name = "Redis MQ Host " + Interlocked.Increment(ref bgThreadCount) - }; - bgThread.Start(); - Log.Debug("Started Background Thread: " + bgThread.Name); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(ex); - } - } - } - - private void KillBgThreadIfExists() - { - if (bgThread != null && bgThread.IsAlive) - { - //give it a small chance to die gracefully - if (!bgThread.Join(500)) - { - //Ideally we shouldn't get here, but lets try our hardest to clean it up - Log.Warn("Interrupting previous Background Thread: " + bgThread.Name); - bgThread.Interrupt(); - if (!bgThread.Join(TimeSpan.FromSeconds(3))) - { - Log.Warn(bgThread.Name + " just wont die, so we're now aborting it..."); - bgThread.Abort(); - } - } - bgThread = null; - } - } - - private void Init() - { - if (this.messageHandlers == null) - { - this.messageHandlers = this.handlerMap.Values.ToList() - .ConvertAll(x => x.CreateMessageHandler()).ToArray(); - } - if (inQueueNames == null) - { - inQueueNames = this.handlerMap.Keys.ToList() - .ConvertAll(x => new QueueNames(x).In).ToArray(); - } - } - - public string GetStatus() - { - switch (Interlocked.CompareExchange(ref status, 0, 0)) - { - case WorkerStatus.Disposed: - return "Disposed"; - case WorkerStatus.Stopped: - return "Stopped"; - case WorkerStatus.Stopping: - return "Stopping"; - case WorkerStatus.Starting: - return "Starting"; - case WorkerStatus.Started: - return "Started"; - } - return null; - } - - public IMessageHandlerStats GetStats() - { - lock (messageHandlers) - { - var total = new MessageHandlerStats("All Handlers"); - messageHandlers.ToList().ForEach(x => total.Add(x.GetStats())); - return total; - } - } - - public string GetStatsDescription() - { - lock (messageHandlers) - { - var sb = new StringBuilder("#MQ HOST STATS:\n"); - sb.AppendLine("==============="); - sb.AppendLine("For: " + this.Title); - sb.AppendLine("Current Status: " + GetStatus()); - sb.AppendLine("Listening On: " + string.Join(", ", inQueueNames)); - sb.AppendLine("Times Started: " + Interlocked.CompareExchange(ref timesStarted, 0, 0)); - sb.AppendLine("Num of Errors: " + Interlocked.CompareExchange(ref noOfErrors, 0, 0)); - sb.AppendLine("Num of Continuous Errors: " + Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); - sb.AppendLine("Last ErrorMsg: " + lastExMsg); - sb.AppendLine("==============="); - foreach (var messageHandler in messageHandlers) - { - sb.AppendLine(messageHandler.GetStats().ToString()); - sb.AppendLine("---------------\n"); - } - return sb.ToString(); - } - } - - public virtual void Stop() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - throw new ObjectDisposedException("MQ Host has been disposed"); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopping, WorkerStatus.Started) == WorkerStatus.Started) - { - Log.Debug("Stopping MQ Host..."); - - //Unblock current bgthread by issuing StopCommand - try - { - using (var redis = clientsManager.GetClient()) - { - redis.PublishMessage(QueueNames.TopicIn, WorkerStatus.StopCommand); - } - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(ex); - Log.Warn("Could not send STOP message to bg thread: " + ex.Message); - } - } - } - - public virtual void Dispose() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - return; - - Stop(); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Disposed, WorkerStatus.Stopped) != WorkerStatus.Stopped) - Interlocked.CompareExchange(ref status, WorkerStatus.Disposed, WorkerStatus.Stopping); - - try - { - KillBgThreadIfExists(); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(ex); - } - } - } - -} diff --git a/src/ServiceStack.Redis/Messaging/RedisMqHostPool.cs b/src/ServiceStack.Redis/Messaging/RedisMqHostPool.cs deleted file mode 100644 index d1d4f1e2..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMqHostPool.cs +++ /dev/null @@ -1,182 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Configuration; -using System.Linq; -using System.Text; -using ServiceStack.Common; -using ServiceStack.Logging; -using ServiceStack.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Messaging -{ - [Obsolete("Will be removed. Use RedisMqServer")] - public class RedisMqHostPool : IMessageService - { - private static readonly ILog Log = LogManager.GetLogger(typeof(RedisMqHostPool)); - - public const int DefaultNoOfThreadsPerService = 1; - public const int DefaultRetryCount = 2; //3 tries in total - - private readonly IRedisClientsManager redisManager; - - public RedisMqHostPool(IRedisClientsManager redisManager, - int? retryCount = 0, TimeSpan? requestTimeOut = null) - { - this.NoOfThreadsPerService = DefaultNoOfThreadsPerService; - this.RetryCount = retryCount.GetValueOrDefault(DefaultRetryCount); - this.RequestTimeOut = requestTimeOut; - this.mqHostsBuilder = new List(); - this.redisManager = redisManager; - this.MessageFactory = new RedisMessageFactory(redisManager); - - this.ErrorHandler = (mqHost, ex) => - Log.Error("Exception in Background Thread: {0} on mqHost: {1}".Fmt(ex.Message, ((RedisMqHost)mqHost).Title), ex); - } - - public int NoOfThreadsPerService { get; set; } - public int RetryCount { get; set; } - public TimeSpan? RequestTimeOut { get; protected set; } - public Action ErrorHandler { get; set; } - - public long BgThreadCount - { - get { return mqHosts.Sum(x => ((RedisMqHost) x).BgThreadCount); } - } - - protected List mqHostsBuilder; - protected IMessageService[] mqHosts; - - public virtual void RegisterHandler(Func, object> processMessageFn, int? noOfThreads = null) - { - (noOfThreads ?? this.NoOfThreadsPerService).Times(x => - { - var redisMqHost = new RedisMqHost(redisManager, this.RetryCount, this.RequestTimeOut); - redisMqHost.RegisterHandler(processMessageFn); - mqHostsBuilder.Add(redisMqHost); - }); - } - - public void RegisterHandler(Func, object> processMessageFn) - { - RegisterHandler(processMessageFn, (int?)null); - } - - public virtual void RegisterHandler(Func, object> processMessageFn, Action, Exception> processExceptionEx, int? noOfThreads = null) - { - (noOfThreads ?? this.NoOfThreadsPerService).Times(x => - { - var redisMqHost = new RedisMqHost(redisManager, this.RetryCount, this.RequestTimeOut); - redisMqHost.RegisterHandler(processMessageFn, processExceptionEx); - mqHostsBuilder.Add(redisMqHost); - }); - } - - public void RegisterHandler(Func, object> processMessageFn, Action, Exception> processExceptionEx) - { - RegisterHandler(processMessageFn, processExceptionEx, null); - } - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient(this.redisManager, null); - } - - public virtual string GetStatus() - { - if (mqHosts == null) return null; - var statusSet = new HashSet(); - lock (mqHosts) - { - foreach (var mqHost in mqHosts) - { - statusSet.Add(((RedisMqHost)mqHost).GetStatus()); - } - } - var allStatuses = string.Join(",", statusSet.ToArray()); - return allStatuses; - } - - public virtual IMessageHandlerStats GetStats() - { - if (mqHosts == null) return null; - lock (mqHosts) - { - var total = new MessageHandlerStats("All Handlers"); - mqHosts.ToList().ForEach(x => total.Add(x.GetStats())); - return total; - } - } - - public virtual string GetStatsDescription() - { - if (mqHosts == null) return null; - lock (mqHosts) - { - var sb = new StringBuilder(); - mqHosts.ToList().ForEach(x => sb.AppendFormat(x.GetStatsDescription() + "\n\n")); - return sb.ToString(); - } - } - - public virtual void Start() - { - //First call should be started on a single thread, i.e. in Global.asax Application_Start() - if (mqHosts == null) - { - if (mqHostsBuilder.Count == 0) - throw new ConfigurationException("No Handler's were registered."); - - mqHosts = mqHostsBuilder.ToArray(); - } - - foreach (var mqHost in mqHosts) - { - try - { - mqHost.Start(); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(mqHost, ex); - } - } - } - - public virtual void Stop() - { - if (mqHosts == null) return; - - foreach (var mqHost in mqHosts) - { - try - { - mqHost.Stop(); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(mqHost, ex); - } - } - } - - public IMessageFactory MessageFactory { get; set; } - - public virtual void Dispose() - { - if (mqHosts == null) return; - - foreach (var mqHost in mqHosts) - { - try - { - mqHost.Dispose(); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(mqHost, ex); - } - } - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisMqServer.cs b/src/ServiceStack.Redis/Messaging/RedisMqServer.cs deleted file mode 100644 index 0012f548..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisMqServer.cs +++ /dev/null @@ -1,516 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading; -using ServiceStack.Common; -using ServiceStack.Logging; -using ServiceStack.Messaging; -using ServiceStack.Service; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Messaging -{ - /// - /// Creates a Redis MQ Server that processes each message on its own background thread. - /// i.e. if you register 3 handlers it will create 7 background threads: - /// - 1 listening to the Redis MQ Subscription, getting notified of each new message - /// - 3x1 Normal InQ for each message handler - /// - 3x1 PriorityQ for each message handler - /// - /// When RedisMqServer Starts it creates a background thread subscribed to the Redis MQ Topic that - /// listens for new incoming messages. It also starts 2 background threads for each message type: - /// - 1 for processing the services Priority Queue and 1 processing the services normal Inbox Queue. - /// - /// Priority Queue's can be enabled on a message-per-message basis by specifying types in the - /// OnlyEnablePriortyQueuesForTypes property. The DisableAllPriorityQueues property disables all Queues. - /// - /// The Start/Stop methods are idempotent i.e. It's safe to call them repeatedly on multiple threads - /// and the Redis MQ Server will only have Started or Stopped once. - /// - public class RedisMqServer : IMessageService - { - private static readonly ILog Log = LogManager.GetLogger(typeof(RedisMqServer)); - public const int DefaultRetryCount = 2; //Will be a total of 3 attempts - - public int RetryCount { get; protected set; } - - public IMessageFactory MessageFactory { get; private set; } - - public Func ReplyClientFactory { get; set; } - - /// - /// Execute global transformation or custom logic before a request is processed. - /// Must be thread-safe. - /// - public Func RequestFilter { get; set; } - - /// - /// Execute global transformation or custom logic on the response. - /// Must be thread-safe. - /// - public Func ResponseFilter { get; set; } - - /// - /// Execute global error handler logic. Must be thread-safe. - /// - public Action ErrorHandler { get; set; } - - /// - /// If you only want to enable priority queue handlers (and threads) for specific msg types - /// - public Type[] OnlyEnablePriortyQueuesForTypes { get; set; } - - /// - /// Don't listen on any Priority Queues - /// - public bool DisableAllPriorityQueues - { - set - { - OnlyEnablePriortyQueuesForTypes = new Type[0]; - } - } - - private readonly IRedisClientsManager clientsManager; //Thread safe redis client/conn factory - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient(this.clientsManager, null); - } - - //Stats - private long timesStarted = 0; - private long noOfErrors = 0; - private int noOfContinuousErrors = 0; - private string lastExMsg = null; - private int status; - - private Thread bgThread; //Subscription controller thread - private long bgThreadCount = 0; - public long BgThreadCount - { - get { return Interlocked.CompareExchange(ref bgThreadCount, 0, 0); } - } - - private readonly Dictionary handlerMap - = new Dictionary(); - - private readonly Dictionary handlerThreadCountMap - = new Dictionary(); - - private MessageHandlerWorker[] workers; - private Dictionary queueWorkerIndexMap; - - - public RedisMqServer(IRedisClientsManager clientsManager, - int retryCount = DefaultRetryCount, TimeSpan? requestTimeOut = null) - { - this.clientsManager = clientsManager; - this.RetryCount = retryCount; - //this.RequestTimeOut = requestTimeOut; - this.MessageFactory = new RedisMessageFactory(clientsManager); - this.ErrorHandler = ex => Log.Error("Exception in Redis MQ Server: " + ex.Message, ex); - } - - public void RegisterHandler(Func, object> processMessageFn) - { - RegisterHandler(processMessageFn, null, noOfThreads:1); - } - - public void RegisterHandler(Func, object> processMessageFn, int noOfThreads) - { - RegisterHandler(processMessageFn, null, noOfThreads); - } - - public void RegisterHandler(Func, object> processMessageFn, Action, Exception> processExceptionEx) - { - RegisterHandler(processMessageFn, processExceptionEx, noOfThreads: 1); - } - - public void RegisterHandler(Func, object> processMessageFn, Action, Exception> processExceptionEx, int noOfThreads) - { - if (handlerMap.ContainsKey(typeof(T))) - { - throw new ArgumentException("Message handler has already been registered for type: " + typeof(T).Name); - } - - handlerMap[typeof(T)] = CreateMessageHandlerFactory(processMessageFn, processExceptionEx); - handlerThreadCountMap[typeof(T)] = noOfThreads; - } - - protected IMessageHandlerFactory CreateMessageHandlerFactory(Func, object> processMessageFn, Action, Exception> processExceptionEx) - { - return new MessageHandlerFactory(this, processMessageFn, processExceptionEx) { - RequestFilter = this.RequestFilter, - ResponseFilter = this.ResponseFilter, - RetryCount = RetryCount, - }; - } - - public void Init() - { - if (workers == null) - { - var workerBuilder = new List(); - - foreach (var entry in handlerMap) - { - var msgType = entry.Key; - var handlerFactory = entry.Value; - - var queueNames = new QueueNames(msgType); - var noOfThreads = handlerThreadCountMap[msgType]; - - if (OnlyEnablePriortyQueuesForTypes == null - || OnlyEnablePriortyQueuesForTypes.Any(x => x == msgType)) - { - noOfThreads.Times(i => - workerBuilder.Add(new MessageHandlerWorker( - clientsManager, - handlerFactory.CreateMessageHandler(), - queueNames.Priority, - WorkerErrorHandler))); - } - - noOfThreads.Times(i => - workerBuilder.Add(new MessageHandlerWorker( - clientsManager, - handlerFactory.CreateMessageHandler(), - queueNames.In, - WorkerErrorHandler))); - } - - workers = workerBuilder.ToArray(); - - queueWorkerIndexMap = new Dictionary(); - for (var i = 0; i < workers.Length; i++) - { - var worker = workers[i]; - - int[] workerIds; - if (!queueWorkerIndexMap.TryGetValue(worker.QueueName, out workerIds)) - { - queueWorkerIndexMap[worker.QueueName] = new[] { i }; - } - else - { - workerIds = new List(workerIds) { i }.ToArray(); - queueWorkerIndexMap[worker.QueueName] = workerIds; - } - } - } - } - - public void Start() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Started) - { - //Start any stopped worker threads - StartWorkerThreads(); - return; - } - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - throw new ObjectDisposedException("MQ Host has been disposed"); - - //Only 1 thread allowed past - if (Interlocked.CompareExchange(ref status, WorkerStatus.Starting, WorkerStatus.Stopped) == WorkerStatus.Stopped) //Should only be 1 thread past this point - { - try - { - Init(); - - if (workers == null || workers.Length == 0) - { - Log.Warn("Cannot start a MQ Server with no Message Handlers registered, ignoring."); - Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Starting); - return; - } - - foreach (var worker in workers) - { - worker.Start(); - } - - SleepBackOffMultiplier(Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); - - KillBgThreadIfExists(); - - bgThread = new Thread(RunLoop) { - IsBackground = true, - Name = "Redis MQ Server " + Interlocked.Increment(ref bgThreadCount) - }; - bgThread.Start(); - Log.Debug("Started Background Thread: " + bgThread.Name); - - StartWorkerThreads(); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(ex); - } - } - } - - private void RunLoop() - { - if (Interlocked.CompareExchange(ref status, WorkerStatus.Started, WorkerStatus.Starting) != WorkerStatus.Starting) return; - Interlocked.Increment(ref timesStarted); - - try - { - using (var redisClient = clientsManager.GetReadOnlyClient()) - { - //Record that we had a good run... - Interlocked.CompareExchange(ref noOfContinuousErrors, 0, noOfContinuousErrors); - - using (var subscription = redisClient.CreateSubscription()) - { - subscription.OnUnSubscribe = channel => Log.Debug("OnUnSubscribe: " + channel); - - subscription.OnMessage = (channel, msg) => { - - if (msg == WorkerStatus.StopCommand) - { - Log.Debug("Stop Command Issued"); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Started) != WorkerStatus.Started) - Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Stopping); - - Log.Debug("UnSubscribe From All Channels..."); - subscription.UnSubscribeFromAllChannels(); //Un block thread. - return; - } - - if (!string.IsNullOrEmpty(msg)) - { - int[] workerIndexes; - if (queueWorkerIndexMap.TryGetValue(msg, out workerIndexes)) - { - foreach (var workerIndex in workerIndexes) - { - workers[workerIndex].NotifyNewMessage(); - } - } - } - }; - - subscription.SubscribeToChannels(QueueNames.TopicIn); //blocks thread - } - - StopWorkerThreads(); - } - } - catch (Exception ex) - { - lastExMsg = ex.Message; - Interlocked.Increment(ref noOfErrors); - Interlocked.Increment(ref noOfContinuousErrors); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Started) != WorkerStatus.Started) - Interlocked.CompareExchange(ref status, WorkerStatus.Stopped, WorkerStatus.Stopping); - - StopWorkerThreads(); - - if (this.ErrorHandler != null) - this.ErrorHandler(ex); - } - } - - public void Stop() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - throw new ObjectDisposedException("MQ Host has been disposed"); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Stopping, WorkerStatus.Started) == WorkerStatus.Started) - { - Log.Debug("Stopping MQ Host..."); - - //Unblock current bgthread by issuing StopCommand - try - { - using (var redis = clientsManager.GetClient()) - { - redis.PublishMessage(QueueNames.TopicIn, WorkerStatus.StopCommand); - } - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(ex); - Log.Warn("Could not send STOP message to bg thread: " + ex.Message); - } - } - } - - public void NotifyAll() - { - Log.Debug("Notifying all worker threads to check for new messages..."); - foreach (var worker in workers) - { - worker.NotifyNewMessage(); - } - } - - public void StartWorkerThreads() - { - Log.Debug("Starting all Redis MQ Server worker threads..."); - Array.ForEach(workers, x => x.Start()); - } - - public void ForceRestartWorkerThreads() - { - Log.Debug("ForceRestart all Redis MQ Server worker threads..."); - Array.ForEach(workers, x => x.ForceRestart()); - } - - public void StopWorkerThreads() - { - Log.Debug("Stopping all Redis MQ Server worker threads..."); - Array.ForEach(workers, x => x.Stop()); - } - - void DisposeWorkerThreads() - { - Log.Debug("Disposing all Redis MQ Server worker threads..."); - if (workers != null) Array.ForEach(workers, x => x.Dispose()); - } - - void WorkerErrorHandler(MessageHandlerWorker source, Exception ex) - { - Log.Error("Received exception in Worker: " + source.QueueName, ex); - for (int i = 0; i < workers.Length; i++) - { - var worker = workers[i]; - if (worker == source) - { - Log.Debug("Starting new {0} Worker at index {1}...".Fmt(source.QueueName, i)); - workers[i] = source.Clone(); - workers[i].Start(); - worker.Dispose(); - return; - } - } - } - - private void KillBgThreadIfExists() - { - if (bgThread != null && bgThread.IsAlive) - { - //give it a small chance to die gracefully - if (!bgThread.Join(500)) - { - //Ideally we shouldn't get here, but lets try our hardest to clean it up - Log.Warn("Interrupting previous Background Thread: " + bgThread.Name); - bgThread.Interrupt(); - if (!bgThread.Join(TimeSpan.FromSeconds(3))) - { - Log.Warn(bgThread.Name + " just wont die, so we're now aborting it..."); - bgThread.Abort(); - } - } - bgThread = null; - } - } - - readonly Random rand = new Random(Environment.TickCount); - private void SleepBackOffMultiplier(int continuousErrorsCount) - { - if (continuousErrorsCount == 0) return; - const int MaxSleepMs = 60 * 1000; - - //exponential/random retry back-off. - var nextTry = Math.Min( - rand.Next((int)Math.Pow(continuousErrorsCount, 3), (int)Math.Pow(continuousErrorsCount + 1, 3) + 1), - MaxSleepMs); - - Log.Debug("Sleeping for {0}ms after {1} continuous errors".Fmt(nextTry, continuousErrorsCount)); - - Thread.Sleep(nextTry); - } - - public virtual void Dispose() - { - if (Interlocked.CompareExchange(ref status, 0, 0) == WorkerStatus.Disposed) - return; - - Stop(); - - if (Interlocked.CompareExchange(ref status, WorkerStatus.Disposed, WorkerStatus.Stopped) != WorkerStatus.Stopped) - Interlocked.CompareExchange(ref status, WorkerStatus.Disposed, WorkerStatus.Stopping); - - try - { - DisposeWorkerThreads(); - } - catch (Exception ex) - { - Log.Error("Error DisposeWorkerThreads(): ", ex); - } - - try - { - Thread.Sleep(100); //give it a small chance to die gracefully - KillBgThreadIfExists(); - } - catch (Exception ex) - { - if (this.ErrorHandler != null) this.ErrorHandler(ex); - } - } - - public string GetStatus() - { - switch (Interlocked.CompareExchange(ref status, 0, 0)) - { - case WorkerStatus.Disposed: - return "Disposed"; - case WorkerStatus.Stopped: - return "Stopped"; - case WorkerStatus.Stopping: - return "Stopping"; - case WorkerStatus.Starting: - return "Starting"; - case WorkerStatus.Started: - return "Started"; - } - return null; - } - - public IMessageHandlerStats GetStats() - { - lock (workers) - { - var total = new MessageHandlerStats("All Handlers"); - workers.ToList().ForEach(x => total.Add(x.GetStats())); - return total; - } - } - - public string GetStatsDescription() - { - lock (workers) - { - var sb = new StringBuilder("#MQ SERVER STATS:\n"); - sb.AppendLine("==============="); - sb.AppendLine("Current Status: " + GetStatus()); - sb.AppendLine("Listening On: " + string.Join(", ", workers.ToList().ConvertAll(x => x.QueueName).ToArray())); - sb.AppendLine("Times Started: " + Interlocked.CompareExchange(ref timesStarted, 0, 0)); - sb.AppendLine("Num of Errors: " + Interlocked.CompareExchange(ref noOfErrors, 0, 0)); - sb.AppendLine("Num of Continuous Errors: " + Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); - sb.AppendLine("Last ErrorMsg: " + lastExMsg); - sb.AppendLine("==============="); - foreach (var worker in workers) - { - sb.AppendLine(worker.GetStats().ToString()); - sb.AppendLine("---------------\n"); - } - return sb.ToString(); - } - } - - public List WorkerThreadsStatus() - { - return workers.ToList().ConvertAll(x => x.GetStatus()); - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisTransientMessageFactory.cs b/src/ServiceStack.Redis/Messaging/RedisTransientMessageFactory.cs deleted file mode 100644 index af50e922..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisTransientMessageFactory.cs +++ /dev/null @@ -1,87 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - /// - /// Transient message queues are a one-pass message queue service that starts - /// processing messages when Start() is called. Any subsequent Start() calls - /// while the service is running is ignored. - /// - /// The transient service will continue to run until all messages have been - /// processed after which time it will shutdown all processing until Start() is called again. - /// - public class RedisTransientMessageFactory - : IMessageFactory - { - public IRedisClientsManager ClientsManager { get; private set; } - - public RedisTransientMessageService MessageService { get; private set; } - - public RedisTransientMessageFactory( - IRedisClientsManager clientsManager) - : this(2, null, clientsManager) - { - } - - public RedisTransientMessageFactory(int retryAttempts, TimeSpan? requestTimeOut, - IRedisClientsManager clientsManager) - { - this.ClientsManager = clientsManager ?? new BasicRedisClientManager(); - MessageService = new RedisTransientMessageService( - retryAttempts, requestTimeOut, this); - } - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient(this.ClientsManager, OnMessagePublished); - } - - public IMessageProducer CreateMessageProducer() - { - return new RedisMessageProducer(this.ClientsManager, OnMessagePublished); - } - - public IMessageService CreateMessageService() - { - return MessageService; - } - - - public void OnMessagePublished() - { - if (this.MessageService != null) - { - this.MessageService.Start(); - } - } - - public void Dispose() - { - if (this.MessageService != null) - { - this.MessageService.Dispose(); - this.MessageService = null; - } - - if (this.ClientsManager != null) - { - this.ClientsManager.Dispose(); - this.ClientsManager = null; - } - } - - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/RedisTransientMessageService.cs b/src/ServiceStack.Redis/Messaging/RedisTransientMessageService.cs deleted file mode 100644 index 638d0de7..00000000 --- a/src/ServiceStack.Redis/Messaging/RedisTransientMessageService.cs +++ /dev/null @@ -1,38 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Common.Extensions; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisTransientMessageService - : TransientMessageServiceBase - { - private readonly RedisTransientMessageFactory messageFactory; - - public RedisTransientMessageService(int retryAttempts, TimeSpan? requestTimeOut, - RedisTransientMessageFactory messageFactory) - : base(retryAttempts, requestTimeOut) - { - messageFactory.ThrowIfNull("messageFactory"); - this.messageFactory = messageFactory; - } - - public override IMessageFactory MessageFactory - { - get { return messageFactory; } - } - } - -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Messaging/WorkerStatus.cs b/src/ServiceStack.Redis/Messaging/WorkerStatus.cs deleted file mode 100644 index 6f66d2cc..00000000 --- a/src/ServiceStack.Redis/Messaging/WorkerStatus.cs +++ /dev/null @@ -1,32 +0,0 @@ -namespace ServiceStack.Redis.Messaging -{ - public static class WorkerStatus - { - public const int Disposed = -1; - public const int Stopped = 0; - public const int Stopping = 1; - public const int Starting = 2; - public const int Started = 3; - - //Control Commands - public const string StopCommand = "STOP"; - - public static string ToString(int workerStatus) - { - switch (workerStatus) - { - case Disposed: - return "Disposed"; - case Stopped: - return "Stopped"; - case Stopping: - return "Stopping"; - case Starting: - return "Starting"; - case Started: - return "Started"; - } - return "Unknown"; - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.Async.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.Async.cs new file mode 100644 index 00000000..fcadd82b --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.Async.cs @@ -0,0 +1,51 @@ +using System; +using System.Threading.Tasks; +using System.Collections.Generic; +using ServiceStack.Redis.Internal; + +namespace ServiceStack.Redis.Pipeline +{ + /// + /// A complete redis command, with method to send command, receive response, and run callback on success or failure + /// + internal partial class QueuedRedisCommand : RedisCommand + { + public override ValueTask ExecuteAsync(IRedisClientAsync client) + { + try + { + switch (AsyncReturnCommand) + { + case null: + ExecuteThrowIfSync(); + return default; + case Func VoidReturnCommandAsync: + return VoidReturnCommandAsync(client); + case Func> IntReturnCommandAsync: + return IntReturnCommandAsync(client).Await(); + case Func> LongReturnCommandAsync: + return LongReturnCommandAsync(client).Await(); + case Func> DoubleReturnCommandAsync: + return DoubleReturnCommandAsync(client).Await(); + case Func> BytesReturnCommandAsync: + return BytesReturnCommandAsync(client).Await(); + case Func> StringReturnCommandAsync: + return StringReturnCommandAsync(client).Await(); + case Func> MultiBytesReturnCommandAsync: + return MultiBytesReturnCommandAsync(client).Await(); + case Func>> MultiStringReturnCommandAsync: + return MultiStringReturnCommandAsync(client).Await(); + case object obj: + ExecuteThrowIfSync(); + // Execute only processes a limited number of patterns; we'll respect that here too + throw new InvalidOperationException("Command cannot be executed in this context: " + obj.GetType().FullName); + } + } + catch (Exception ex) + { + Log.Error(ex); + throw; + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs index 9b917148..238c9521 100644 --- a/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs @@ -3,62 +3,64 @@ namespace ServiceStack.Redis.Pipeline { - /// - /// A complete redis command, with method to send command, receive response, and run callback on success or failure - /// - internal class QueuedRedisCommand : RedisCommand - { - public override void Execute(IRedisClient client) - { - try - { - if (VoidReturnCommand != null) - { - VoidReturnCommand(client); + /// + /// A complete redis command, with method to send command, receive response, and run callback on success or failure + /// + internal partial class QueuedRedisCommand : RedisCommand + { + public override void Execute(IRedisClient client) + { + try + { + if (VoidReturnCommand != null) + { + VoidReturnCommand(client); - } - else if (IntReturnCommand != null) - { - IntReturnCommand(client); + } + else if (IntReturnCommand != null) + { + IntReturnCommand(client); - } - else if (LongReturnCommand != null) - { - LongReturnCommand(client); + } + else if (LongReturnCommand != null) + { + LongReturnCommand(client); - } - else if (DoubleReturnCommand != null) - { - DoubleReturnCommand(client); + } + else if (DoubleReturnCommand != null) + { + DoubleReturnCommand(client); - } - else if (BytesReturnCommand != null) - { - BytesReturnCommand(client); + } + else if (BytesReturnCommand != null) + { + BytesReturnCommand(client); - } - else if (StringReturnCommand != null) - { - StringReturnCommand(client); + } + else if (StringReturnCommand != null) + { + StringReturnCommand(client); - } - else if (MultiBytesReturnCommand != null) - { - MultiBytesReturnCommand(client); + } + else if (MultiBytesReturnCommand != null) + { + MultiBytesReturnCommand(client); - } - else if (MultiStringReturnCommand != null) - { - MultiStringReturnCommand(client); - - } - } - catch (Exception ex) - { - Log.Error(ex); - throw; - } - - } - } + } + else if (MultiStringReturnCommand != null) + { + MultiStringReturnCommand(client); + } + else + { + ExecuteThrowIfAsync(); + } + } + catch (Exception ex) + { + Log.Error(ex); + throw; + } + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.Async.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.Async.cs new file mode 100644 index 00000000..e1779605 --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.Async.cs @@ -0,0 +1,159 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Pipeline +{ + internal partial class QueuedRedisOperation + { + public virtual ValueTask ExecuteAsync(IRedisClientAsync client) => default; + + private Delegate _asyncReadCommand; + private QueuedRedisOperation SetAsyncReadCommand(Delegate value) + { + if (_asyncReadCommand is object && _asyncReadCommand != value) + throw new InvalidOperationException("Only a single async read command can be assigned"); + _asyncReadCommand = value; + return this; + } + + internal QueuedRedisOperation WithAsyncReadCommand(Func VoidReadCommandAsync) + => SetAsyncReadCommand(VoidReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> IntReadCommandAsync) + => SetAsyncReadCommand(IntReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> LongReadCommandAsync) + => SetAsyncReadCommand(LongReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> BoolReadCommandAsync) + => SetAsyncReadCommand(BoolReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> BytesReadCommandAsync) + => SetAsyncReadCommand(BytesReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> MultiBytesReadCommandAsync) + => SetAsyncReadCommand(MultiBytesReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> StringReadCommandAsync) + => SetAsyncReadCommand(StringReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func>> MultiStringReadCommandAsync) + => SetAsyncReadCommand(MultiStringReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func>> DictionaryStringReadCommandAsync) + => SetAsyncReadCommand(DictionaryStringReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> DoubleReadCommandAsync) + => SetAsyncReadCommand(DoubleReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> RedisDataReadCommandAsync) + => SetAsyncReadCommand(RedisDataReadCommandAsync); + + public async ValueTask ProcessResultAsync(CancellationToken token) + { + try + { + switch (_asyncReadCommand) + { + case null: + ProcessResultThrowIfSync(); + break; + case Func VoidReadCommandAsync: + await VoidReadCommandAsync(token).ConfigureAwait(false); + OnSuccessVoidCallback?.Invoke(); + break; + case Func> IntReadCommandAsync: + var i32 = await IntReadCommandAsync(token).ConfigureAwait(false); + OnSuccessIntCallback?.Invoke(i32); + OnSuccessLongCallback?.Invoke(i32); + OnSuccessBoolCallback?.Invoke(i32 == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); + break; + case Func> LongReadCommandAsync: + var i64 = await LongReadCommandAsync(token).ConfigureAwait(false); + OnSuccessIntCallback?.Invoke((int)i64); + OnSuccessLongCallback?.Invoke(i64); + OnSuccessBoolCallback?.Invoke(i64 == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); + break; + case Func> DoubleReadCommandAsync: + var f64 = await DoubleReadCommandAsync(token).ConfigureAwait(false); + OnSuccessDoubleCallback?.Invoke(f64); + break; + case Func> BytesReadCommandAsync: + var bytes = await BytesReadCommandAsync(token).ConfigureAwait(false); + if (bytes != null && bytes.Length == 0) bytes = null; + OnSuccessBytesCallback?.Invoke(bytes); + OnSuccessStringCallback?.Invoke(bytes != null ? Encoding.UTF8.GetString(bytes) : null); + OnSuccessTypeCallback?.Invoke(bytes != null ? Encoding.UTF8.GetString(bytes) : null); + OnSuccessIntCallback?.Invoke(bytes != null ? int.Parse(Encoding.UTF8.GetString(bytes)) : 0); + OnSuccessBoolCallback?.Invoke(bytes != null && Encoding.UTF8.GetString(bytes) == "OK"); + break; + case Func> StringReadCommandAsync: + var s = await StringReadCommandAsync(token).ConfigureAwait(false); + OnSuccessStringCallback?.Invoke(s); + OnSuccessTypeCallback?.Invoke(s); + break; + case Func> MultiBytesReadCommandAsync: + var multiBytes = await MultiBytesReadCommandAsync(token).ConfigureAwait(false); + OnSuccessMultiBytesCallback?.Invoke(multiBytes); + OnSuccessMultiStringCallback?.Invoke(multiBytes?.ToStringList()); + OnSuccessMultiTypeCallback?.Invoke(multiBytes.ToStringList()); + OnSuccessDictionaryStringCallback?.Invoke(multiBytes.ToStringDictionary()); + break; + case Func>> MultiStringReadCommandAsync: + var multiString = await MultiStringReadCommandAsync(token).ConfigureAwait(false); + OnSuccessMultiStringCallback?.Invoke(multiString); + break; + case Func> RedisDataReadCommandAsync: + var data = await RedisDataReadCommandAsync(token).ConfigureAwait(false); + OnSuccessRedisTextCallback?.Invoke(data.ToRedisText()); + OnSuccessRedisDataCallback?.Invoke(data); + break; + case Func> BoolReadCommandAsync: + var b = await BoolReadCommandAsync(token).ConfigureAwait(false); + OnSuccessBoolCallback?.Invoke(b); + break; + case Func>> DictionaryStringReadCommandAsync: + var dict = await DictionaryStringReadCommandAsync(token).ConfigureAwait(false); + OnSuccessDictionaryStringCallback?.Invoke(dict); + break; + default: + ProcessResultThrowIfSync(); + break; + } + } + catch (Exception ex) + { + Log.Error(ex); + + if (OnErrorCallback != null) + { + OnErrorCallback(ex); + } + else + { + throw; + } + } + } + + partial void OnProcessResultThrowIfAsync() + { + if (_asyncReadCommand is object) + { + throw new InvalidOperationException("An async read command was present, but the queued operation is being processed synchronously"); + } + } + private void ProcessResultThrowIfSync() + { + if (VoidReadCommand is object + || IntReadCommand is object + || LongReadCommand is object + || BoolReadCommand is object + || BytesReadCommand is object + || MultiBytesReadCommand is object + || StringReadCommand is object + || MultiBytesReadCommand is object + || DictionaryStringReadCommand is object + || DoubleReadCommand is object + || RedisDataReadCommand is object) + { + throw new InvalidOperationException("A sync read command was present, but the queued operation is being processed asynchronously"); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs index c5ec8acc..3a45d25b 100644 --- a/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs @@ -5,171 +5,143 @@ namespace ServiceStack.Redis.Pipeline { - internal class QueuedRedisOperation - { - protected static readonly ILog Log = LogManager.GetLogger(typeof(QueuedRedisOperation)); + internal partial class QueuedRedisOperation + { + protected static readonly ILog Log = LogManager.GetLogger(typeof(QueuedRedisOperation)); - public Action VoidReadCommand { get; set; } - public Func IntReadCommand { get; set; } - public Func LongReadCommand { get; set; } - public Func BoolReadCommand { get; set; } - public Func BytesReadCommand { get; set; } - public Func MultiBytesReadCommand { get; set; } - public Func StringReadCommand { get; set; } - public Func> MultiStringReadCommand { get; set; } - public Func DoubleReadCommand { get; set; } + public Action VoidReadCommand { get; set; } + public Func IntReadCommand { get; set; } + public Func LongReadCommand { get; set; } + public Func BoolReadCommand { get; set; } + public Func BytesReadCommand { get; set; } + public Func MultiBytesReadCommand { get; set; } + public Func StringReadCommand { get; set; } + public Func> MultiStringReadCommand { get; set; } + public Func> DictionaryStringReadCommand { get; set; } + public Func DoubleReadCommand { get; set; } + public Func RedisDataReadCommand { get; set; } - public Action OnSuccessVoidCallback { get; set; } - public Action OnSuccessIntCallback { get; set; } - public Action OnSuccessLongCallback { get; set; } - public Action OnSuccessBoolCallback { get; set; } - public Action OnSuccessBytesCallback { get; set; } - public Action OnSuccessMultiBytesCallback { get; set; } - public Action OnSuccessStringCallback { get; set; } - public Action> OnSuccessMultiStringCallback { get; set; } - public Action OnSuccessDoubleCallback { get; set; } - - public Action OnSuccessTypeCallback { get; set; } - public Action> OnSuccessMultiTypeCallback { get; set; } + public Action OnSuccessVoidCallback { get; set; } + public Action OnSuccessIntCallback { get; set; } + public Action OnSuccessLongCallback { get; set; } + public Action OnSuccessBoolCallback { get; set; } + public Action OnSuccessBytesCallback { get; set; } + public Action OnSuccessMultiBytesCallback { get; set; } + public Action OnSuccessStringCallback { get; set; } + public Action> OnSuccessMultiStringCallback { get; set; } + public Action> OnSuccessDictionaryStringCallback { get; set; } + public Action OnSuccessRedisDataCallback { get; set; } + public Action OnSuccessRedisTextCallback { get; set; } + public Action OnSuccessDoubleCallback { get; set; } - public Action OnErrorCallback { get; set; } + public Action OnSuccessTypeCallback { get; set; } + public Action> OnSuccessMultiTypeCallback { get; set; } - public virtual void Execute(IRedisClient client) - { - - } + public Action OnErrorCallback { get; set; } - public void ProcessResult() - { - try - { - if (VoidReadCommand != null) - { - VoidReadCommand(); - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } - } - else if (IntReadCommand != null) - { - var result = IntReadCommand(); - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback(result); - } - if (OnSuccessLongCallback != null) - { - OnSuccessLongCallback(result); - } - if (OnSuccessBoolCallback != null) - { - var success = result == RedisNativeClient.Success; - OnSuccessBoolCallback(success); - } - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } - } - else if (LongReadCommand != null) - { - var result = LongReadCommand(); - if (OnSuccessLongCallback != null) - { - OnSuccessLongCallback(result); - } - if (OnSuccessBoolCallback != null) - { - var success = result == RedisNativeClient.Success; - OnSuccessBoolCallback(success); - } - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } - } - else if (DoubleReadCommand != null) - { - var result = DoubleReadCommand(); - if (OnSuccessDoubleCallback != null) - { - OnSuccessDoubleCallback(result); - } - } - else if (BytesReadCommand != null) - { - var result = BytesReadCommand(); - if (OnSuccessBytesCallback != null) - { - OnSuccessBytesCallback(result); - } - if (OnSuccessStringCallback != null) - { - OnSuccessStringCallback(result != null ? Encoding.UTF8.GetString(result) : null); - } - if (OnSuccessTypeCallback != null) - { - OnSuccessTypeCallback(result != null ? Encoding.UTF8.GetString(result) : null); - } - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback(result != null ? int.Parse(Encoding.UTF8.GetString(result)) : 0); - } - } - else if (StringReadCommand != null) - { - var result = StringReadCommand(); - if (OnSuccessStringCallback != null) - { - OnSuccessStringCallback(result); - } - if (OnSuccessTypeCallback != null) - { - OnSuccessTypeCallback(result); - } - } - else if (MultiBytesReadCommand != null) - { - var result = MultiBytesReadCommand(); - if (OnSuccessMultiBytesCallback != null) - { - OnSuccessMultiBytesCallback(result); - } - if (OnSuccessMultiStringCallback != null) - { - OnSuccessMultiStringCallback(result != null ? result.ToStringList() : null); - } - - if (OnSuccessMultiTypeCallback != null) - { - OnSuccessMultiTypeCallback(result.ToStringList()); - } - - } - else if (MultiStringReadCommand != null) - { - var result = MultiStringReadCommand(); - if (OnSuccessMultiStringCallback != null) - { - OnSuccessMultiStringCallback(result); - } - } - } - catch (Exception ex) - { - Log.Error(ex); + public virtual void Execute(IRedisClient client) + { - if (OnErrorCallback != null) - { - OnErrorCallback(ex); - } - else - { - throw; - } - } - } + } - } + public void ProcessResult() + { + try + { + if (VoidReadCommand != null) + { + VoidReadCommand(); + OnSuccessVoidCallback?.Invoke(); + } + else if (IntReadCommand != null) + { + var result = IntReadCommand(); + OnSuccessIntCallback?.Invoke(result); + OnSuccessLongCallback?.Invoke(result); + OnSuccessBoolCallback?.Invoke(result == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); + } + else if (LongReadCommand != null) + { + var result = LongReadCommand(); + OnSuccessIntCallback?.Invoke((int)result); + OnSuccessLongCallback?.Invoke(result); + OnSuccessBoolCallback?.Invoke(result == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); + } + else if (DoubleReadCommand != null) + { + var result = DoubleReadCommand(); + OnSuccessDoubleCallback?.Invoke(result); + } + else if (BytesReadCommand != null) + { + var result = BytesReadCommand(); + if (result != null && result.Length == 0) + result = null; + + OnSuccessBytesCallback?.Invoke(result); + OnSuccessStringCallback?.Invoke(result != null ? Encoding.UTF8.GetString(result) : null); + OnSuccessTypeCallback?.Invoke(result != null ? Encoding.UTF8.GetString(result) : null); + OnSuccessIntCallback?.Invoke(result != null ? int.Parse(Encoding.UTF8.GetString(result)) : 0); + OnSuccessBoolCallback?.Invoke(result != null && Encoding.UTF8.GetString(result) == "OK"); + } + else if (StringReadCommand != null) + { + var result = StringReadCommand(); + OnSuccessStringCallback?.Invoke(result); + OnSuccessTypeCallback?.Invoke(result); + } + else if (MultiBytesReadCommand != null) + { + var result = MultiBytesReadCommand(); + OnSuccessMultiBytesCallback?.Invoke(result); + OnSuccessMultiStringCallback?.Invoke(result != null ? result.ToStringList() : null); + OnSuccessMultiTypeCallback?.Invoke(result.ToStringList()); + OnSuccessDictionaryStringCallback?.Invoke(result.ToStringDictionary()); + } + else if (MultiStringReadCommand != null) + { + var result = MultiStringReadCommand(); + OnSuccessMultiStringCallback?.Invoke(result); + } + else if (RedisDataReadCommand != null) + { + var data = RedisDataReadCommand(); + OnSuccessRedisTextCallback?.Invoke(data.ToRedisText()); + OnSuccessRedisDataCallback?.Invoke(data); + } + else if (BoolReadCommand != null) + { + var result = BoolReadCommand(); + OnSuccessBoolCallback?.Invoke(result); + } + else if (DictionaryStringReadCommand != null) + { + var result = DictionaryStringReadCommand(); + OnSuccessDictionaryStringCallback?.Invoke(result); + } + else + { + ProcessResultThrowIfAsync(); + } + } + catch (Exception ex) + { + Log.Error(ex); + + if (OnErrorCallback != null) + { + OnErrorCallback(ex); + } + else + { + throw; + } + } + } + + protected void ProcessResultThrowIfAsync() => OnProcessResultThrowIfAsync(); + partial void OnProcessResultThrowIfAsync(); + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.Async.cs b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.Async.cs new file mode 100644 index 00000000..992bb480 --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.Async.cs @@ -0,0 +1,330 @@ +using ServiceStack.Redis.Pipeline; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + + public partial class RedisAllPurposePipeline : IRedisPipelineAsync + { + private IRedisPipelineAsync AsAsync() => this; + + private protected virtual async ValueTask ReplayAsync(CancellationToken token) + { + Init(); + await ExecuteAsync().ConfigureAwait(false); + await AsAsync().FlushAsync(token).ConfigureAwait(false); + return true; + } + + protected async ValueTask ExecuteAsync() + { + int count = QueuedCommands.Count; + for (int i = 0; i < count; ++i) + { + var op = QueuedCommands[0]; + QueuedCommands.RemoveAt(0); + await op.ExecuteAsync(RedisClient).ConfigureAwait(false); + QueuedCommands.Add(op); + } + } + + ValueTask IRedisPipelineSharedAsync.ReplayAsync(CancellationToken token) + => ReplayAsync(token); + + async ValueTask IRedisPipelineSharedAsync.FlushAsync(CancellationToken token) + { + // flush send buffers + await RedisClient.FlushSendBufferAsync(token).ConfigureAwait(false); + RedisClient.ResetSendBuffer(); + + try + { + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (Exception) + { + // The connection cannot be reused anymore. All queued commands have been sent to redis. Even if a new command is executed, the next response read from the + // network stream can be the response of one of the queued commands, depending on when the exception occurred. This response would be invalid for the new command. + RedisClient.DisposeConnection(); + throw; + } + + ClosePipeline(); + } + + private protected virtual ValueTask DisposeAsync() + { + // don't need to send anything; just clean up + Dispose(); + return default; + } + + ValueTask IAsyncDisposable.DisposeAsync() => DisposeAsync(); + + internal static void AssertSync(ValueTask command) + { + if (!command.IsCompleted) + { + _ = ObserveAsync(command.AsTask()); + throw new InvalidOperationException($"The operations provided to {nameof(IRedisQueueableOperationAsync.QueueCommand)} should not perform asynchronous operations internally"); + } + // this serves two purposes: 1) surface any fault, and + // 2) ensure that if pooled (IValueTaskSource), it is reclaimed + _ = command.Result; + } + + internal static void AssertSync(ValueTask command) + { + if (!command.IsCompleted) + { + _ = ObserveAsync(command.AsTask()); + throw new InvalidOperationException($"The operations provided to {nameof(IRedisQueueableOperationAsync.QueueCommand)} should not perform asynchronous operations internally"); + } + // this serves two purposes: 1) surface any fault, and + // 2) ensure that if pooled (IValueTaskSource), it is reclaimed + command.GetAwaiter().GetResult(); + } + + static async Task ObserveAsync(Task task) // semantically this is "async void", but: some sync-contexts explode on that + { + // we've already thrown an exception via AssertSync; this + // just ensures that an "unobserved exception" doesn't fire + // as well + try { await task.ConfigureAwait(false); } + catch { } + } + + void IRedisQueueableOperationAsync.QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessVoidCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessIntCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessLongCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessBoolCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessDoubleCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessMultiBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessMultiStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(async r => + { + var result = await command(r).ConfigureAwait(false); + return result.ToList(); + })); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessDictionaryStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessRedisDataCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessRedisTextCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiBytesQueuedCommandAsync(Func> multiBytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiBytesReadCommand); + AddCurrentQueuedOperation(); + } + + + void IRedisQueueCompletableOperationAsync.CompleteLongQueuedCommandAsync(Func> longReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(longReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteBytesQueuedCommandAsync(Func> bytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(bytesReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteVoidQueuedCommandAsync(Func voidReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(voidReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteStringQueuedCommandAsync(Func> stringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(stringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteDoubleQueuedCommandAsync(Func> doubleReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(doubleReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteIntQueuedCommandAsync(Func> intReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(intReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiStringQueuedCommandAsync(Func>> multiStringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiStringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteRedisDataQueuedCommandAsync(Func> redisDataReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(redisDataReadCommand); + AddCurrentQueuedOperation(); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs index 1309d5ea..19b8fd1b 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs @@ -1,20 +1,21 @@ -using System; using ServiceStack.Redis.Pipeline; +using System; namespace ServiceStack.Redis { - public class RedisAllPurposePipeline: RedisCommandQueue, IRedisPipeline - { + public partial class RedisAllPurposePipeline : RedisCommandQueue, IRedisPipeline + { /// /// General purpose pipeline /// /// - public RedisAllPurposePipeline(RedisClient redisClient) : base(redisClient) - { + public RedisAllPurposePipeline(RedisClient redisClient) + : base(redisClient) + { Init(); - - } + + } protected virtual void Init() { @@ -24,25 +25,35 @@ protected virtual void Init() if (RedisClient.Pipeline != null) throw new InvalidOperationException("A pipeline is already in use"); - RedisClient.Pipeline = this; + RedisClient.Pipeline = this; } - + /// /// Flush send buffer, and read responses /// - public void Flush() - { - // flush send buffers - RedisClient.FlushSendBuffer(); - - //receive expected results - foreach (var queuedCommand in QueuedCommands) + public void Flush() + { + // flush send buffers + RedisClient.FlushAndResetSendBuffer(); + + try { - queuedCommand.ProcessResult(); + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + queuedCommand.ProcessResult(); + } + } + catch (Exception) + { + // The connection cannot be reused anymore. All queued commands have been sent to redis. Even if a new command is executed, the next response read from the + // network stream can be the response of one of the queued commands, depending on when the exception occurred. This response would be invalid for the new command. + RedisClient.DisposeConnection(); + throw; } - ClosePipeline(); - } + ClosePipeline(); + } protected void Execute() { @@ -56,23 +67,22 @@ protected void Execute() } } - public bool Replay() - { - Init(); - Execute(); - Flush(); - return true; - } + public virtual bool Replay() + { + Init(); + Execute(); + Flush(); + return true; + } - protected void ClosePipeline() + protected void ClosePipeline() { - RedisClient.ResetSendBuffer(); - RedisClient.Pipeline = null; + RedisClient.EndPipeline(); } - public void Dispose() + public virtual void Dispose() { ClosePipeline(); } - } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisCommand.Async.cs b/src/ServiceStack.Redis/Pipeline/RedisCommand.Async.cs new file mode 100644 index 00000000..0485af6f --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/RedisCommand.Async.cs @@ -0,0 +1,118 @@ +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + /// + /// Redis command that does not get queued + /// + internal partial class RedisCommand + { + private Delegate _asyncReturnCommand; + protected Delegate AsyncReturnCommand => _asyncReturnCommand; + private RedisCommand SetAsyncReturnCommand(Delegate value) + { + if (_asyncReturnCommand is object && _asyncReturnCommand != value) + throw new InvalidOperationException("Only a single async return command can be assigned"); + _asyncReturnCommand = value; + return this; + } + internal RedisCommand WithAsyncReturnCommand(Func VoidReturnCommandAsync) + => SetAsyncReturnCommand(VoidReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> IntReturnCommandAsync) + => SetAsyncReturnCommand(IntReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> LongReturnCommandAsync) + => SetAsyncReturnCommand(LongReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> BoolReturnCommandAsync) + => SetAsyncReturnCommand(BoolReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> BytesReturnCommandAsync) + => SetAsyncReturnCommand(BytesReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> MultiBytesReturnCommandAsync) + => SetAsyncReturnCommand(MultiBytesReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> StringReturnCommandAsync) + => SetAsyncReturnCommand(StringReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func>> MultiStringReturnCommandAsync) + => SetAsyncReturnCommand(MultiStringReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func>> DictionaryStringReturnCommandAsync) + => SetAsyncReturnCommand(DictionaryStringReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> RedisDataReturnCommandAsync) + => SetAsyncReturnCommand(RedisDataReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> RedisTextReturnCommandAsync) + => SetAsyncReturnCommand(RedisTextReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> DoubleReturnCommandAsync) + => SetAsyncReturnCommand(DoubleReturnCommandAsync); + + public override ValueTask ExecuteAsync(IRedisClientAsync client) + { + try + { + switch (_asyncReturnCommand) + { + case null: + ExecuteThrowIfSync(); + return default; + case Func VoidReturnCommandAsync: + return VoidReturnCommandAsync(client); + case Func> IntReturnCommandAsync: + return IntReturnCommandAsync(client).Await(); + case Func> LongReturnCommandAsync: + return LongReturnCommandAsync(client).Await(); + case Func> DoubleReturnCommandAsync: + return DoubleReturnCommandAsync(client).Await(); + case Func> BytesReturnCommandAsync: + return BytesReturnCommandAsync(client).Await(); + case Func> StringReturnCommandAsync: + return StringReturnCommandAsync(client).Await(); + case Func> MultiBytesReturnCommandAsync: + return MultiBytesReturnCommandAsync(client).Await(); + case Func>> MultiStringReturnCommandAsync: + return MultiStringReturnCommandAsync(client).Await(); + case Func>> DictionaryStringReturnCommandAsync: + return DictionaryStringReturnCommandAsync(client).Await(); + case Func> RedisDataReturnCommandAsync: + return RedisDataReturnCommandAsync(client).Await(); + case Func> RedisTextReturnCommandAsync: + return RedisTextReturnCommandAsync(client).Await(); + case Func> BoolReturnCommandAsync: + return BoolReturnCommandAsync(client).Await(); + case object obj: + ExecuteThrowIfSync(); + return default; + } + } + catch (Exception ex) + { + Log.Error(ex); + return default; // RedisCommand.Execute swallows here; we'll do the same + } + } + + partial void OnExecuteThrowIfAsync() + { + if (_asyncReturnCommand is object) + { + throw new InvalidOperationException("An async return command was present, but the queued operation is being processed synchronously"); + } + } + protected void ExecuteThrowIfSync() + { + if (VoidReturnCommand is object + || IntReturnCommand is object + || LongReturnCommand is object + || BoolReturnCommand is object + || BytesReturnCommand is object + || MultiBytesReturnCommand is object + || StringReturnCommand is object + || MultiStringReturnCommand is object + || DictionaryStringReturnCommand is object + || RedisDataReturnCommand is object + || RedisTextReturnCommand is object + || DoubleReturnCommand is object) + { + throw new InvalidOperationException("A sync return command was present, but the queued operation is being processed asynchronously"); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisCommand.cs b/src/ServiceStack.Redis/Pipeline/RedisCommand.cs index 3503c531..abe09a35 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisCommand.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisCommand.cs @@ -9,7 +9,7 @@ namespace ServiceStack.Redis /// /// Redis command that does not get queued /// - internal class RedisCommand : QueuedRedisOperation + internal partial class RedisCommand : QueuedRedisOperation { public Action VoidReturnCommand { get; set; } public Func IntReturnCommand { get; set; } @@ -19,6 +19,9 @@ internal class RedisCommand : QueuedRedisOperation public Func MultiBytesReturnCommand { get; set; } public Func StringReturnCommand { get; set; } public Func> MultiStringReturnCommand { get; set; } + public Func> DictionaryStringReturnCommand { get; set; } + public Func RedisDataReturnCommand { get; set; } + public Func RedisTextReturnCommand { get; set; } public Func DoubleReturnCommand { get; set; } public override void Execute(IRedisClient client) @@ -63,7 +66,26 @@ public override void Execute(IRedisClient client) else if (MultiStringReturnCommand != null) { MultiStringReturnCommand(client); - + } + else if (DictionaryStringReturnCommand != null) + { + DictionaryStringReturnCommand(client); + } + else if (RedisDataReturnCommand != null) + { + RedisDataReturnCommand(client); + } + else if (RedisTextReturnCommand != null) + { + RedisTextReturnCommand(client); + } + else if (BoolReturnCommand != null) + { + BoolReturnCommand(client); + } + else + { + ExecuteThrowIfAsync(); } } catch (Exception ex) @@ -71,5 +93,8 @@ public override void Execute(IRedisClient client) Log.Error(ex); } } + + protected void ExecuteThrowIfAsync() => OnExecuteThrowIfAsync(); + partial void OnExecuteThrowIfAsync(); } } diff --git a/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs b/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs index 890bfcb4..e21d42ab 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs @@ -1,17 +1,18 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; +using System.Linq; using ServiceStack.Redis.Pipeline; namespace ServiceStack.Redis @@ -19,9 +20,9 @@ namespace ServiceStack.Redis /// /// public class RedisCommandQueue : RedisQueueCompletableOperation - { + { protected readonly RedisClient RedisClient; - + public RedisCommandQueue(RedisClient redisClient) { this.RedisClient = redisClient; @@ -224,5 +225,93 @@ public virtual void QueueCommand(Func> command, Actio }); command(RedisClient); } + + + public void QueueCommand(Func> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func> command, Action> onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public virtual void QueueCommand(Func> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + MultiStringReturnCommand = r => command(r).ToList(), + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func> command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func> command, Action> onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + DictionaryStringReturnCommand = command, + OnSuccessDictionaryStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + RedisDataReturnCommand = command, + OnSuccessRedisDataCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } + + + public void QueueCommand(Func command) + { + QueueCommand(command, null, null); + } + + public void QueueCommand(Func command, Action onSuccessCallback) + { + QueueCommand(command, onSuccessCallback, null); + } + + public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + RedisTextReturnCommand = command, + OnSuccessRedisTextCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }); + command(RedisClient); + } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.Async.cs b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.Async.cs new file mode 100644 index 00000000..bea7be2f --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.Async.cs @@ -0,0 +1,32 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Pipeline +{ + partial class RedisPipelineCommand + { + internal async ValueTask> ReadAllAsIntsAsync(CancellationToken token) + { + var results = new List(); + while (cmdCount-- > 0) + { + results.Add(await client.ReadLongAsync(token).ConfigureAwait(false)); + } + + return results; + } + internal async ValueTask ReadAllAsIntsHaveSuccessAsync(CancellationToken token) + { + var allResults = await ReadAllAsIntsAsync(token).ConfigureAwait(false); + return allResults.All(x => x == RedisNativeClient.Success); + } + + internal ValueTask FlushAsync(CancellationToken token) + { + Flush(); + return default; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs index d3e52ff7..67b4cb69 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs @@ -3,42 +3,42 @@ namespace ServiceStack.Redis.Pipeline { - public class RedisPipelineCommand - { - private readonly RedisNativeClient client; - private int cmdCount; + public partial class RedisPipelineCommand + { + private readonly RedisNativeClient client; + private int cmdCount; - public RedisPipelineCommand(RedisNativeClient client) - { - this.client = client; - } + public RedisPipelineCommand(RedisNativeClient client) + { + this.client = client; + } - public void WriteCommand(params byte[][] cmdWithBinaryArgs) - { - client.WriteAllToSendBuffer(cmdWithBinaryArgs); - cmdCount++; - } + public void WriteCommand(params byte[][] cmdWithBinaryArgs) + { + client.WriteAllToSendBuffer(cmdWithBinaryArgs); + cmdCount++; + } - public List ReadAllAsInts() - { - var results = new List(); - while (cmdCount-- > 0) - { - results.Add(client.ReadInt()); - } + public List ReadAllAsInts() + { + var results = new List(); + while (cmdCount-- > 0) + { + results.Add(client.ReadLong()); + } - return results; - } + return results; + } - public bool ReadAllAsIntsHaveSuccess() - { - var allResults = ReadAllAsInts(); - return allResults.All(x => x == RedisNativeClient.Success); - } + public bool ReadAllAsIntsHaveSuccess() + { + var allResults = ReadAllAsInts(); + return allResults.All(x => x == RedisNativeClient.Success); + } - public void Flush() - { - client.FlushSendBuffer(); - } - } + public void Flush() + { + client.FlushAndResetSendBuffer(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs b/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs index 2029b73f..92b10529 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs @@ -7,7 +7,7 @@ namespace ServiceStack.Redis /// /// Redis operation (transaction/pipeline) that allows queued commands to be completed /// - public class RedisQueueCompletableOperation + public partial class RedisQueueCompletableOperation { internal readonly List QueuedCommands = new List(); @@ -43,7 +43,7 @@ public virtual void CompleteVoidQueuedCommand(Action voidReadCommand) AddCurrentQueuedOperation(); } - public virtual void CompleteIntQueuedCommand(Func intReadCommand) + public virtual void CompleteIntQueuedCommand(Func intReadCommand) { //AssertCurrentOperation(); // this can happen when replaying pipeline/transaction @@ -113,6 +113,15 @@ public virtual void CompleteDoubleQueuedCommand(Func doubleReadCommand) AddCurrentQueuedOperation(); } + public virtual void CompleteRedisDataQueuedCommand(Func redisDataReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.RedisDataReadCommand = redisDataReadCommand; + AddCurrentQueuedOperation(); + } } } diff --git a/src/ServiceStack.Redis/PooledRedisClientManager.Async.cs b/src/ServiceStack.Redis/PooledRedisClientManager.Async.cs new file mode 100644 index 00000000..79734a35 --- /dev/null +++ b/src/ServiceStack.Redis/PooledRedisClientManager.Async.cs @@ -0,0 +1,283 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Caching; +using ServiceStack.Redis.Internal; +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.AsyncEx; + +namespace ServiceStack.Redis +{ + public partial class PooledRedisClientManager + : IRedisClientsManagerAsync + { + /// + /// Use previous client resolving behavior + /// + public static bool UseGetClientBlocking = false; + + ValueTask IRedisClientsManagerAsync.GetCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this).AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetClientAsync(CancellationToken token) => UseGetClientBlocking + ? GetClientBlocking().AsValueTaskResult() + : GetClientAsync(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this) { ReadOnly = true }.AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyClientAsync(CancellationToken token) => UseGetClientBlocking + ? GetReadOnlyClientBlocking().AsValueTaskResult() + : GetReadOnlyClientAsync(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + private AsyncManualResetEvent readAsyncEvent; + partial void PulseAllReadAsync() + { + readAsyncEvent?.Set(); + readAsyncEvent?.Reset(); + } + + private AsyncManualResetEvent writeAsyncEvent; + partial void PulseAllWriteAsync() + { + writeAsyncEvent?.Set(); + writeAsyncEvent?.Reset(); + } + + private async Task WaitForWriter(int msTimeout) + { + // If we're not doing async, no need to create this till we need it. + writeAsyncEvent ??= new AsyncManualResetEvent(false); + var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(msTimeout)); + try + { + await writeAsyncEvent.WaitAsync(cts.Token); + } + catch (OperationCanceledException) { return false; } + return true; + } + + private async ValueTask GetClientAsync() + { + try + { + var inactivePoolIndex = -1; + do + { + lock (writeClients) + { + AssertValidReadWritePool(); + + // If it's -1, then we want to try again after a delay of some kind. So if it's NOT negative one, process it... + if ((inactivePoolIndex = GetInActiveWriteClient(out var inActiveClient)) != -1) + { + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + WritePoolIndex++; + inActiveClient.Activate(); + + InitClient(inActiveClient); + + return inActiveClient; + } + else + { + // Still need to be in lock for this! + break; + } + } + } + + if (PoolTimeout.HasValue) + { + // We have a timeout value set - so try to not wait longer than this. + if (!await WaitForWriter(PoolTimeout.Value)) + { + throw new TimeoutException(PoolTimeoutError); + } + } + else + { + // Wait forever, so just retry till we get one. + await WaitForWriter(RecheckPoolAfterMs); + } + } while (true); // Just keep repeating until we get a slot. + + //Reaches here when there's no Valid InActive Clients, but we have a slot for one! + try + { + //inactivePoolIndex = index of reservedSlot || index of invalid client + var existingClient = writeClients[inactivePoolIndex]; + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateMasterClient(inactivePoolIndex)); + + //Put all blocking I/O or potential Exceptions before lock + lock (writeClients) + { + //If existingClient at inactivePoolIndex changed (failover) return new client outside of pool + if (writeClients[inactivePoolIndex] != existingClient) + { + if (Log.IsDebugEnabled) + Log.Debug("writeClients[inactivePoolIndex] != existingClient: {0}".Fmt(writeClients[inactivePoolIndex])); + + return newClient; //return client outside of pool + } + + WritePoolIndex++; + writeClients[inactivePoolIndex] = newClient; + + return !AssertAccessOnlyOnSameThread + ? newClient + : newClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw (before lock) + lock (writeClients) + { + writeClients[inactivePoolIndex] = null; //free slot + } + throw; + } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } + + private async Task WaitForReader(int msTimeout) + { + // If we're not doing async, no need to create this till we need it. + readAsyncEvent ??= new AsyncManualResetEvent(false); + var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(msTimeout)); + try + { + await readAsyncEvent.WaitAsync(cts.Token); + } + catch (OperationCanceledException) { return false; } + return true; + } + + private async ValueTask GetReadOnlyClientAsync() + { + try + { + var inactivePoolIndex = -1; + do + { + lock (readClients) + { + AssertValidReadOnlyPool(); + + // If it's -1, then we want to try again after a delay of some kind. So if it's NOT negative one, process it... + if ((inactivePoolIndex = GetInActiveReadClient(out var inActiveClient)) != -1) + { + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + ReadPoolIndex++; + inActiveClient.Activate(); + + InitClient(inActiveClient); + + return inActiveClient; + } + else + { + // Still need to be in lock for this! + break; + } + } + } + + if (PoolTimeout.HasValue) + { + // We have a timeout value set - so try to not wait longer than this. + if (!await WaitForReader(PoolTimeout.Value)) + { + throw new TimeoutException(PoolTimeoutError); + } + } + else + { + // Wait forever, so just retry till we get one. + await WaitForReader(RecheckPoolAfterMs); + } + } while (true); // Just keep repeating until we get a slot. + + //Reaches here when there's no Valid InActive Clients + try + { + //inactivePoolIndex = index of reservedSlot || index of invalid client + var existingClient = readClients[inactivePoolIndex]; + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateSlaveClient(inactivePoolIndex)); + + //Put all blocking I/O or potential Exceptions before lock + lock (readClients) + { + //If existingClient at inactivePoolIndex changed (failover) return new client outside of pool + if (readClients[inactivePoolIndex] != existingClient) + { + if (Log.IsDebugEnabled) + Log.Debug("readClients[inactivePoolIndex] != existingClient: {0}".Fmt(readClients[inactivePoolIndex])); + + Interlocked.Increment(ref RedisState.TotalClientsCreatedOutsidePool); + + //Don't handle callbacks for new client outside pool + newClient.ClientManager = null; + return newClient; //return client outside of pool + } + + ReadPoolIndex++; + readClients[inactivePoolIndex] = newClient; + return newClient; + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw + lock (readClients) + { + readClients[inactivePoolIndex] = null; //free slot + } + throw; + } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } + + } + +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/PooledRedisClientManager.Disposable.cs b/src/ServiceStack.Redis/PooledRedisClientManager.Disposable.cs deleted file mode 100644 index 5aa85974..00000000 --- a/src/ServiceStack.Redis/PooledRedisClientManager.Disposable.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System; - -namespace ServiceStack.Redis -{ - - public partial class PooledRedisClientManager: IRedisClientCacheManager - { - /// - /// Manage a client acquired from the PooledRedisClientManager - /// Dispose method will release the client back to the pool. - /// - public class DisposablePooledClient : IDisposable where T : RedisNativeClient - { - private T client; - private readonly PooledRedisClientManager clientManager; - - /// - /// wrap the acquired client - /// - /// - public DisposablePooledClient(PooledRedisClientManager clientManager) - { - this.clientManager = clientManager; - if (clientManager != null) - client = (T)clientManager.GetClient(); - } - - /// - /// access the wrapped client - /// - public T Client { get { return client; } } - - /// - /// release the wrapped client back to the pool - /// - public void Dispose() - { - if (client != null) - clientManager.DisposeClient(client); - client = null; - } - } - - public DisposablePooledClient GetDisposableClient() where T : RedisNativeClient - { - return new DisposablePooledClient(this); - } - - } -} diff --git a/src/ServiceStack.Redis/PooledRedisClientManager.ICacheClient.cs b/src/ServiceStack.Redis/PooledRedisClientManager.ICacheClient.cs deleted file mode 100644 index 7d46a6c9..00000000 --- a/src/ServiceStack.Redis/PooledRedisClientManager.ICacheClient.cs +++ /dev/null @@ -1,38 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using ServiceStack.CacheAccess; - -namespace ServiceStack.Redis -{ - /// - /// For interoperabilty GetCacheClient() and GetReadOnlyCacheClient() - /// return an ICacheClient wrapper around the redis manager which has the affect of calling - /// GetClient() for all write operations and GetReadOnlyClient() for the read ones. - /// - /// This works well for master-slave replication scenarios where you have - /// 1 master that replicates to multiple read slaves. - /// - public partial class PooledRedisClientManager - : IRedisClientCacheManager - { - public ICacheClient GetCacheClient() - { - return new RedisClientManagerCacheClient(this); - } - - public ICacheClient GetReadOnlyCacheClient() - { - return new RedisClientManagerCacheClient(this) { ReadOnly = true }; - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/PooledRedisClientManager.cs b/src/ServiceStack.Redis/PooledRedisClientManager.cs index 66afe20a..481db23a 100644 --- a/src/ServiceStack.Redis/PooledRedisClientManager.cs +++ b/src/ServiceStack.Redis/PooledRedisClientManager.cs @@ -1,419 +1,755 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Linq; using System.Threading; -using ServiceStack.CacheAccess; -using ServiceStack.Common.Web; +using ServiceStack.Caching; using ServiceStack.Logging; +using ServiceStack.Text; namespace ServiceStack.Redis { - /// - /// Provides thread-safe pooling of redis client connections. - /// Allows load-balancing of master-write and read-slave hosts, ideal for - /// 1 master and multiple replicated read slaves. - /// - public partial class PooledRedisClientManager - : IRedisClientsManager - { - private static readonly ILog Log = LogManager.GetLogger(typeof(PooledRedisClientManager)); - - private const string PoolTimeoutError = - "Redis Timeout expired. The timeout period elapsed prior to obtaining a connection from the pool. This may have occurred because all pooled connections were in use."; - - protected readonly int PoolSizeMultiplier = 10; - public int? PoolTimeOut { get; set; } + /// + /// Provides thread-safe pooling of redis client connections. + /// Allows load-balancing of master-write and read-replica hosts, ideal for + /// 1 master and multiple replicated read replicas. + /// + public partial class PooledRedisClientManager + : IRedisClientsManager, IRedisFailover, IHandleClientDispose, IHasRedisResolver + { + private static readonly ILog Log = LogManager.GetLogger(typeof(PooledRedisClientManager)); + + private const string PoolTimeoutError = + "Redis Timeout expired. The timeout period elapsed prior to obtaining a connection from the pool. This may have occurred because all pooled connections were in use."; + + protected readonly int PoolSizeMultiplier = 20; + public int RecheckPoolAfterMs = 100; + public int? PoolTimeout { get; set; } public int? ConnectTimeout { get; set; } - public int? SocketSendTimeout { get; set; } - public int? SocketReceiveTimeout { get; set; } - + public int? SocketSendTimeout { get; set; } + public int? SocketReceiveTimeout { get; set; } + public int? IdleTimeOutSecs { get; set; } + public bool AssertAccessOnlyOnSameThread { get; set; } + /// /// Gets or sets object key prefix. /// public string NamespacePrefix { get; set; } - private List ReadWriteHosts { get; set; } - private List ReadOnlyHosts { get; set; } - - private RedisClient[] writeClients = new RedisClient[0]; - protected int WritePoolIndex; + public IRedisResolver RedisResolver { get; set; } + public List> OnFailover { get; private set; } - private RedisClient[] readClients = new RedisClient[0]; - protected int ReadPoolIndex; + private RedisClient[] writeClients = TypeConstants.EmptyArray; + protected int WritePoolIndex; - protected int RedisClientCounter = 0; + private RedisClient[] readClients = TypeConstants.EmptyArray; + protected int ReadPoolIndex; - protected RedisClientManagerConfig Config { get; set; } + protected int RedisClientCounter = 0; - public IRedisClientFactory RedisClientFactory { get; set; } + protected RedisClientManagerConfig Config { get; set; } - public int Db { get; private set; } + public long? Db { get; private set; } public Action ConnectionFilter { get; set; } - public PooledRedisClientManager() : this(RedisNativeClient.DefaultHost) { } + public PooledRedisClientManager() : this(RedisConfig.DefaultHost) { } - public PooledRedisClientManager(int poolSize, int poolTimeOutSeconds, params string[] readWriteHosts) - : this(readWriteHosts, readWriteHosts, null, RedisNativeClient.DefaultDb, poolSize, poolTimeOutSeconds) - { - } + public PooledRedisClientManager(int poolSize, int poolTimeOutSeconds, params string[] readWriteHosts) + : this(readWriteHosts, readWriteHosts, null, null, poolSize, poolTimeOutSeconds) + { + } - public PooledRedisClientManager(int initialDb, params string[] readWriteHosts) - : this(readWriteHosts, readWriteHosts, initialDb) {} + public PooledRedisClientManager(long initialDb, params string[] readWriteHosts) + : this(readWriteHosts, readWriteHosts, initialDb) { } public PooledRedisClientManager(params string[] readWriteHosts) : this(readWriteHosts, readWriteHosts) { } - public PooledRedisClientManager(IEnumerable readWriteHosts, IEnumerable readOnlyHosts) - : this(readWriteHosts, readOnlyHosts, null) - { - } - - /// - /// Hosts can be an IP Address or Hostname in the format: host[:port] - /// e.g. 127.0.0.1:6379 - /// default is: localhost:6379 - /// - /// The write hosts. - /// The read hosts. - /// The config. - public PooledRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - RedisClientManagerConfig config) - : this(readWriteHosts, readOnlyHosts, config, RedisNativeClient.DefaultDb, null, null) - { - } - - public PooledRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - int initalDb) - : this(readWriteHosts, readOnlyHosts, null, initalDb, null, null) - { - } - - public PooledRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - RedisClientManagerConfig config, - int initalDb, - int? poolSizeMultiplier, - int? poolTimeOutSeconds) - { - this.Db = config != null - ? config.DefaultDb.GetValueOrDefault(initalDb) - : initalDb; - - ReadWriteHosts = readWriteHosts.ToRedisEndPoints(); - ReadOnlyHosts = readOnlyHosts.ToRedisEndPoints(); - - this.RedisClientFactory = Redis.RedisClientFactory.Instance; - - this.PoolSizeMultiplier = poolSizeMultiplier ?? 10; - - this.Config = config ?? new RedisClientManagerConfig { - MaxWritePoolSize = ReadWriteHosts.Count * PoolSizeMultiplier, - MaxReadPoolSize = ReadOnlyHosts.Count * PoolSizeMultiplier, - }; - - // if timeout provided, convert into milliseconds - this.PoolTimeOut = poolTimeOutSeconds != null - ? poolTimeOutSeconds * 1000 - : null; - - - if (this.Config.AutoStart) - { - this.OnStart(); - } - } - - protected virtual void OnStart() - { - this.Start(); - } - - /// - /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - /// - /// - public IRedisClient GetClient() - { - lock (writeClients) - { - AssertValidReadWritePool(); - - RedisClient inActiveClient; - while ((inActiveClient = GetInActiveWriteClient()) == null) - { - if (PoolTimeOut.HasValue) - { - // wait for a connection, cry out if made to wait too long - if (!Monitor.Wait(writeClients, PoolTimeOut.Value)) - throw new TimeoutException(PoolTimeoutError); - } - else - Monitor.Wait(writeClients); - } - - WritePoolIndex++; - inActiveClient.Active = true; - - if (this.ConnectTimeout != null) + public PooledRedisClientManager(IEnumerable readWriteHosts, IEnumerable readOnlyHosts) + : this(readWriteHosts, readOnlyHosts, null) + { + } + + /// + /// Hosts can be an IP Address or Hostname in the format: host[:port] + /// e.g. 127.0.0.1:6379 + /// default is: localhost:6379 + /// + /// The write hosts. + /// The read hosts. + /// The config. + public PooledRedisClientManager( + IEnumerable readWriteHosts, + IEnumerable readOnlyHosts, + RedisClientManagerConfig config) + : this(readWriteHosts, readOnlyHosts, config, null, null, null) + { + } + + public PooledRedisClientManager( + IEnumerable readWriteHosts, + IEnumerable readOnlyHosts, + long initialDb) + : this(readWriteHosts, readOnlyHosts, null, initialDb, null, null) + { + } + + public PooledRedisClientManager( + IEnumerable readWriteHosts, + IEnumerable readOnlyHosts, + RedisClientManagerConfig config, + long? initialDb, + int? poolSizeMultiplier, + int? poolTimeOutSeconds) + { + this.Db = config != null + ? config.DefaultDb ?? initialDb + : initialDb; + + var masters = (readWriteHosts ?? TypeConstants.EmptyStringArray).ToArray(); + var replicas = (readOnlyHosts ?? TypeConstants.EmptyStringArray).ToArray(); + + RedisResolver = new RedisResolver(masters, replicas); + + this.PoolSizeMultiplier = poolSizeMultiplier ?? RedisConfig.DefaultPoolSizeMultiplier; + + this.Config = config ?? new RedisClientManagerConfig + { + MaxWritePoolSize = RedisConfig.DefaultMaxPoolSize ?? masters.Length * PoolSizeMultiplier, + MaxReadPoolSize = RedisConfig.DefaultMaxPoolSize ?? replicas.Length * PoolSizeMultiplier, + }; + + this.OnFailover = new List>(); + + // if timeout provided, convert into milliseconds + this.PoolTimeout = poolTimeOutSeconds != null + ? poolTimeOutSeconds * 1000 + : 2000; //Default Timeout + + this.AssertAccessOnlyOnSameThread = RedisConfig.AssertAccessOnlyOnSameThread; + + JsConfig.InitStatics(); + + if (this.Config.AutoStart) + { + this.OnStart(); + } + } + + public void FailoverTo(params string[] readWriteHosts) + { + FailoverTo(readWriteHosts, readWriteHosts); + } + + public void FailoverTo(IEnumerable readWriteHosts, IEnumerable readOnlyHosts) + { + Interlocked.Increment(ref RedisState.TotalFailovers); + + var masters = readWriteHosts.ToList(); + var replicas = readOnlyHosts.ToList(); + + Log.Info($"FailoverTo: {string.Join(",", masters)} : {string.Join(",", replicas)} Total: {RedisState.TotalFailovers}"); + + lock (readClients) + { + for (var i = 0; i < readClients.Length; i++) { - inActiveClient.ConnectTimeout = this.ConnectTimeout.Value; + var redis = readClients[i]; + if (redis != null) + RedisState.DeactivateClient(redis); + + readClients[i] = null; } + RedisResolver.ResetSlaves(replicas); + } - if( this.SocketSendTimeout.HasValue ) - { - inActiveClient.SendTimeout = this.SocketSendTimeout.Value; - } - if( this.SocketReceiveTimeout.HasValue ) - { - inActiveClient.ReceiveTimeout = this.SocketReceiveTimeout.Value; - } - - inActiveClient.NamespacePrefix = NamespacePrefix; - - //Reset database to default if changed - if (inActiveClient.Db != Db) - { - inActiveClient.Db = Db; - } - - return inActiveClient; - } - } - - /// - /// Called within a lock - /// - /// - private RedisClient GetInActiveWriteClient() - { - var desiredIndex = WritePoolIndex % writeClients.Length; + lock (writeClients) + { + for (var i = 0; i < writeClients.Length; i++) + { + var redis = writeClients[i]; + if (redis != null) + RedisState.DeactivateClient(redis); + + writeClients[i] = null; + } + RedisResolver.ResetMasters(masters); + } + + if (this.OnFailover != null) + { + foreach (var callback in OnFailover) + { + try + { + callback(this); + } + catch (Exception ex) + { + Log.Error("Error firing OnFailover callback(): ", ex); + } + } + } + } + + protected virtual void OnStart() + { + this.Start(); + } + + /// + /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts + /// + /// + public IRedisClient GetClient() => GetClientBlocking(); + + private RedisClient GetClientBlocking() + { + try + { + var poolTimedOut = false; + var inactivePoolIndex = -1; + lock (writeClients) + { + AssertValidReadWritePool(); + + RedisClient inActiveClient; + while ((inactivePoolIndex = GetInActiveWriteClient(out inActiveClient)) == -1) + { + if (PoolTimeout.HasValue) + { + // wait for a connection, cry out if made to wait too long + if (!Monitor.Wait(writeClients, PoolTimeout.Value)) + { + poolTimedOut = true; + break; + } + } + else + { + Monitor.Wait(writeClients, RecheckPoolAfterMs); + } + } + + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + WritePoolIndex++; + inActiveClient.Activate(); + + InitClient(inActiveClient); + + return (!AssertAccessOnlyOnSameThread) + ? inActiveClient + : inActiveClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); + } + } + + if (poolTimedOut) + throw new TimeoutException(PoolTimeoutError); + + //Reaches here when there's no Valid InActive Clients + try + { + //inactivePoolIndex = index of reservedSlot || index of invalid client + var existingClient = writeClients[inactivePoolIndex]; + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateMasterClient(inactivePoolIndex)); + + //Put all blocking I/O or potential Exceptions before lock + lock (writeClients) + { + //If existingClient at inactivePoolIndex changed (failover) return new client outside of pool + if (writeClients[inactivePoolIndex] != existingClient) + { + if (Log.IsDebugEnabled) + Log.Debug("writeClients[inactivePoolIndex] != existingClient: {0}".Fmt(writeClients[inactivePoolIndex])); + + return newClient; //return client outside of pool + } + + WritePoolIndex++; + writeClients[inactivePoolIndex] = newClient; + + return (!AssertAccessOnlyOnSameThread) + ? newClient + : newClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw (before lock) + lock (writeClients) + { + writeClients[inactivePoolIndex] = null; //free slot + } + throw; + } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } + + class ReservedClient : RedisClient + { + public ReservedClient() + { + this.DeactivatedAt = DateTime.UtcNow; + } + + public override void Dispose() {} + } + + static readonly ReservedClient reservedSlot = new ReservedClient(); + + /// + /// Called within a lock + /// + /// + private int GetInActiveWriteClient(out RedisClient inactiveClient) + { //this will loop through all hosts in readClients once even though there are 2 for loops - //both loops are used to try to get the prefered host according to the round robin algorithm - for (int x = 0; x < ReadWriteHosts.Count; x++) + //both loops are used to try to get the preferred host according to the round robin algorithm + var readWriteTotal = RedisResolver.ReadWriteHostsCount; + var desiredIndex = WritePoolIndex % writeClients.Length; + for (int x = 0; x < readWriteTotal; x++) { - var nextHostIndex = (desiredIndex + x) % ReadWriteHosts.Count; - var nextHost = ReadWriteHosts[nextHostIndex]; - for (var i = nextHostIndex; i < writeClients.Length; i += ReadWriteHosts.Count) - { + var nextHostIndex = (desiredIndex + x) % readWriteTotal; + for (var i = nextHostIndex; i < writeClients.Length; i += readWriteTotal) + { if (writeClients[i] != null && !writeClients[i].Active && !writeClients[i].HadExceptions) - return writeClients[i]; - else if (writeClients[i] == null || writeClients[i].HadExceptions) { - if (writeClients[i] != null) - writeClients[i].DisposeConnection(); - var client = RedisClientFactory.CreateRedisClient(nextHost.Host, nextHost.Port); + inactiveClient = writeClients[i]; + return i; + } - if (nextHost.RequiresAuth) - client.Password = nextHost.Password; + if (writeClients[i] == null) + { + writeClients[i] = reservedSlot; + inactiveClient = null; + return i; + } - client.Id = RedisClientCounter++; - client.ClientManager = this; - client.NamespacePrefix = NamespacePrefix; - client.ConnectionFilter = ConnectionFilter; + if (writeClients[i] != reservedSlot && writeClients[i].HadExceptions) + { + inactiveClient = null; + return i; + } + } + } + inactiveClient = null; + return -1; + } + + /// + /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. + /// + /// + public virtual IRedisClient GetReadOnlyClient() => GetReadOnlyClientBlocking(); - writeClients[i] = client; + private RedisClient GetReadOnlyClientBlocking() + { + try + { + var poolTimedOut = false; + var inactivePoolIndex = -1; + lock (readClients) + { + AssertValidReadOnlyPool(); - return client; + RedisClient inActiveClient; + while ((inactivePoolIndex = GetInActiveReadClient(out inActiveClient)) == -1) + { + if (PoolTimeout.HasValue) + { + // wait for a connection, break out if made to wait too long + if (!Monitor.Wait(readClients, PoolTimeout.Value)) + { + poolTimedOut = true; + break; + } + } + else + { + Monitor.Wait(readClients, RecheckPoolAfterMs); + } + } + + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + ReadPoolIndex++; + inActiveClient.Activate(); + + InitClient(inActiveClient); + + return inActiveClient; } } - } - return null; - } - - /// - /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - /// - /// - public virtual IRedisClient GetReadOnlyClient() - { - lock (readClients) - { - AssertValidReadOnlyPool(); - - RedisClient inActiveClient; - while ((inActiveClient = GetInActiveReadClient()) == null) - { - Monitor.Wait(readClients); - } - - ReadPoolIndex++; - inActiveClient.Active = true; - - if (this.ConnectTimeout != null) + + if (poolTimedOut) + throw new TimeoutException(PoolTimeoutError); + + //Reaches here when there's no Valid InActive Clients + try { - inActiveClient.ConnectTimeout = this.ConnectTimeout.Value; + //inactivePoolIndex = index of reservedSlot || index of invalid client + var existingClient = readClients[inactivePoolIndex]; + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateSlaveClient(inactivePoolIndex)); + + //Put all blocking I/O or potential Exceptions before lock + lock (readClients) + { + //If existingClient at inactivePoolIndex changed (failover) return new client outside of pool + if (readClients[inactivePoolIndex] != existingClient) + { + if (Log.IsDebugEnabled) + Log.Debug("readClients[inactivePoolIndex] != existingClient: {0}".Fmt(readClients[inactivePoolIndex])); + + Interlocked.Increment(ref RedisState.TotalClientsCreatedOutsidePool); + + //Don't handle callbacks for new client outside pool + newClient.ClientManager = null; + return newClient; //return client outside of pool + } + + ReadPoolIndex++; + readClients[inactivePoolIndex] = newClient; + return newClient; + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw + lock (readClients) + { + readClients[inactivePoolIndex] = null; //free slot + } + throw; } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } - if( this.SocketSendTimeout.HasValue ) - { - inActiveClient.SendTimeout = this.SocketSendTimeout.Value; - } - if( this.SocketReceiveTimeout.HasValue ) - { - inActiveClient.ReceiveTimeout = this.SocketReceiveTimeout.Value; - } - - inActiveClient.NamespacePrefix = NamespacePrefix; - - //Reset database to default if changed - if (inActiveClient.Db != Db) - { - inActiveClient.Db = Db; - } - - return inActiveClient; - } - } - - /// - /// Called within a lock - /// - /// - private RedisClient GetInActiveReadClient() - { + /// + /// Called within a lock + /// + /// + private int GetInActiveReadClient(out RedisClient inactiveClient) + { var desiredIndex = ReadPoolIndex % readClients.Length; //this will loop through all hosts in readClients once even though there are 2 for loops - //both loops are used to try to get the prefered host according to the round robin algorithm - for (int x = 0; x < ReadOnlyHosts.Count; x++) + //both loops are used to try to get the preferred host according to the round robin algorithm + var readOnlyTotal = RedisResolver.ReadOnlyHostsCount; + for (int x = 0; x < readOnlyTotal; x++) { - var nextHostIndex = (desiredIndex + x) % ReadOnlyHosts.Count; - var nextHost = ReadOnlyHosts[nextHostIndex]; - for (var i = nextHostIndex; i < readClients.Length; i += ReadOnlyHosts.Count) + var nextHostIndex = (desiredIndex + x) % readOnlyTotal; + for (var i = nextHostIndex; i < readClients.Length; i += readOnlyTotal) { if (readClients[i] != null && !readClients[i].Active && !readClients[i].HadExceptions) - return readClients[i]; - else if (readClients[i] == null || readClients[i].HadExceptions) { - if (readClients[i] != null) - readClients[i].DisposeConnection(); - var client = RedisClientFactory.CreateRedisClient(nextHost.Host, nextHost.Port); + inactiveClient = readClients[i]; + return i; + } + + if (readClients[i] == null) + { + readClients[i] = reservedSlot; + inactiveClient = null; + return i; + } + + if (readClients[i] != reservedSlot && readClients[i].HadExceptions) + { + inactiveClient = null; + return i; + } + } + } + inactiveClient = null; + return -1; + } - if (nextHost.RequiresAuth) - client.Password = nextHost.Password; + private RedisClient InitNewClient(RedisClient client) + { + client.Id = Interlocked.Increment(ref RedisClientCounter); + client.Activate(newClient:true); + client.ClientManager = this; + client.ConnectionFilter = ConnectionFilter; + if (NamespacePrefix != null) + client.NamespacePrefix = NamespacePrefix; + + return InitClient(client); + } + + private RedisClient InitClient(RedisClient client) + { + if (this.ConnectTimeout != null) + client.ConnectTimeout = this.ConnectTimeout.Value; + if (this.SocketSendTimeout.HasValue) + client.SendTimeout = this.SocketSendTimeout.Value; + if (this.SocketReceiveTimeout.HasValue) + client.ReceiveTimeout = this.SocketReceiveTimeout.Value; + if (this.IdleTimeOutSecs.HasValue) + client.IdleTimeOutSecs = this.IdleTimeOutSecs.Value; + if (this.NamespacePrefix != null) + client.NamespacePrefix = NamespacePrefix; + if (Db != null && client.Db != Db) //Reset database to default if changed + client.ChangeDb(Db.Value); + return client; + } + + partial void PulseAllReadAsync(); + private void PulseAllRead() + { + PulseAllReadAsync(); + Monitor.PulseAll(readClients); + } + + partial void PulseAllWriteAsync(); + private void PulseAllWrite() + { + PulseAllWriteAsync(); + Monitor.PulseAll(writeClients); + } - client.ClientManager = this; - client.ConnectionFilter = ConnectionFilter; + public void DisposeClient(RedisNativeClient client) + { + lock (readClients) + { + for (var i = 0; i < readClients.Length; i++) + { + var readClient = readClients[i]; + if (client != readClient) continue; + if (client.IsDisposed) + { + readClients[i] = null; + } + else + { + client.TrackThread = null; + client.Deactivate(); + } - readClients[i] = client; + PulseAllRead(); + return; + } + } - return client; + lock (writeClients) + { + for (var i = 0; i < writeClients.Length; i++) + { + var writeClient = writeClients[i]; + if (client != writeClient) continue; + if (client.IsDisposed) + { + writeClients[i] = null; + } + else + { + client.TrackThread = null; + client.Deactivate(); } + + PulseAllWrite(); + return; + } + } + + //Client not found in any pool, pulse both pools. + lock (readClients) + { + PulseAllRead(); + } + + lock (writeClients) + { + PulseAllWrite(); + } + } + + /// + /// Disposes the read only client. + /// + /// The client. + public void DisposeReadOnlyClient(RedisNativeClient client) + { + lock (readClients) + { + client.Deactivate(); + PulseAllRead(); + } + } + + /// + /// Disposes the write client. + /// + /// The client. + public void DisposeWriteClient(RedisNativeClient client) + { + lock (writeClients) + { + client.Deactivate(); + PulseAllWrite(); + } + } + + public void Start() + { + if (writeClients.Length > 0 || readClients.Length > 0) + throw new InvalidOperationException("Pool has already been started"); + + writeClients = new RedisClient[Config.MaxWritePoolSize]; + WritePoolIndex = 0; + + readClients = new RedisClient[Config.MaxReadPoolSize]; + ReadPoolIndex = 0; + } + + public Dictionary GetStats() + { + var writeClientsPoolSize = writeClients.Length; + var writeClientsCreated = 0; + var writeClientsWithExceptions = 0; + var writeClientsInUse = 0; + var writeClientsConnected = 0; + + foreach (var client in writeClients) + { + if (client == null) + { + writeClientsCreated++; + continue; + } + + if (client.HadExceptions) + writeClientsWithExceptions++; + if (client.Active) + writeClientsInUse++; + if (client.IsSocketConnected()) + writeClientsConnected++; + } + + var readClientsPoolSize = readClients.Length; + var readClientsCreated = 0; + var readClientsWithExceptions = 0; + var readClientsInUse = 0; + var readClientsConnected = 0; + + foreach (var client in readClients) + { + if (client == null) + { + readClientsCreated++; + continue; } - } - return null; - } - - public void DisposeClient(RedisNativeClient client) - { - lock (readClients) - { - for (var i = 0; i < readClients.Length; i++) - { - var readClient = readClients[i]; - if (client != readClient) continue; - client.Active = false; - Monitor.PulseAll(readClients); - return; - } - } - - lock (writeClients) - { - for (var i = 0; i < writeClients.Length; i++) - { - var writeClient = writeClients[i]; - if (client != writeClient) continue; - client.Active = false; - Monitor.PulseAll(writeClients); - return; - } - } - - //Debug.WriteLine("Couldn't find {0} client with Id: {1}, readclients: {2}, writeclients: {3}", - // client.IsDisposed ? "Disposed" : "Undisposed", - // client.Id, - // string.Join(", ", readClients.ToList().ConvertAll(x => x != null ? x.Id.ToString() : "").ToArray()), - // string.Join(", ", writeClients.ToList().ConvertAll(x => x != null ? x.Id.ToString() : "").ToArray())); - - if (client.IsDisposed) return; - - throw new NotSupportedException("Cannot add unknown client back to the pool"); - } - - /// - /// Disposes the read only client. - /// - /// The client. - public void DisposeReadOnlyClient( RedisNativeClient client ) - { - lock( readClients ) - { - client.Active = false; - Monitor.PulseAll( readClients ); - } - } - - /// - /// Disposes the write client. - /// - /// The client. - public void DisposeWriteClient( RedisNativeClient client ) - { - lock( writeClients ) - { - client.Active = false; - Monitor.PulseAll( writeClients ); - } - } - - public void Start() - { - if (writeClients.Length > 0 || readClients.Length > 0) - throw new InvalidOperationException("Pool has already been started"); - - writeClients = new RedisClient[Config.MaxWritePoolSize]; - WritePoolIndex = 0; - - readClients = new RedisClient[Config.MaxReadPoolSize]; - ReadPoolIndex = 0; - } - - private void AssertValidReadWritePool() - { - if (writeClients.Length < 1) - throw new InvalidOperationException("Need a minimum read-write pool size of 1, then call Start()"); - } - - private void AssertValidReadOnlyPool() - { - if (readClients.Length < 1) - throw new InvalidOperationException("Need a minimum read pool size of 1, then call Start()"); - } - - ~PooledRedisClientManager() - { - Dispose(false); - } + + if (client.HadExceptions) + readClientsWithExceptions++; + if (client.Active) + readClientsInUse++; + if (client.IsSocketConnected()) + readClientsConnected++; + } + + var ret = new Dictionary + { + {"VersionString", "" + Env.VersionString}, + + {"writeClientsPoolSize", "" + writeClientsPoolSize}, + {"writeClientsCreated", "" + writeClientsCreated}, + {"writeClientsWithExceptions", "" + writeClientsWithExceptions}, + {"writeClientsInUse", "" + writeClientsInUse}, + {"writeClientsConnected", "" + writeClientsConnected}, + + {"readClientsPoolSize", "" + readClientsPoolSize}, + {"readClientsCreated", "" + readClientsCreated}, + {"readClientsWithExceptions", "" + readClientsWithExceptions}, + {"readClientsInUse", "" + readClientsInUse}, + {"readClientsConnected", "" + readClientsConnected}, + + {"RedisResolver.ReadOnlyHostsCount", "" + RedisResolver.ReadOnlyHostsCount}, + {"RedisResolver.ReadWriteHostsCount", "" + RedisResolver.ReadWriteHostsCount}, + }; + + return ret; + } + + private void AssertValidReadWritePool() + { + if (writeClients.Length < 1) + throw new InvalidOperationException("Need a minimum read-write pool size of 1, then call Start()"); + } + + private void AssertValidReadOnlyPool() + { + if (readClients.Length < 1) + throw new InvalidOperationException("Need a minimum read pool size of 1, then call Start()"); + } + + public int[] GetClientPoolActiveStates() + { + var activeStates = new int[writeClients.Length]; + lock (writeClients) + { + for (int i = 0; i < writeClients.Length; i++) + { + activeStates[i] = writeClients[i] == null + ? -1 + : writeClients[i].Active ? 1 : 0; + } + } + return activeStates; + } + + public int[] GetReadOnlyClientPoolActiveStates() + { + var activeStates = new int[readClients.Length]; + lock (readClients) + { + for (int i = 0; i < readClients.Length; i++) + { + activeStates[i] = readClients[i] == null + ? -1 + : readClients[i].Active ? 1 : 0; + } + } + return activeStates; + } + + ~PooledRedisClientManager() + { + Dispose(false); + } public void Dispose() { @@ -421,48 +757,103 @@ public void Dispose() GC.SuppressFinalize(this); } - protected virtual void Dispose(bool disposing) - { + protected virtual void Dispose(bool disposing) + { if (Interlocked.Increment(ref disposeAttempts) > 1) return; - if (disposing) - { - // get rid of managed resources - } - - try - { - // get rid of unmanaged resources - for (var i = 0; i < writeClients.Length; i++) - { - Dispose(writeClients[i]); - } - for (var i = 0; i < readClients.Length; i++) - { - Dispose(readClients[i]); - } - } - catch (Exception ex) - { - Log.Error("Error when trying to dispose of PooledRedisClientManager", ex); - } - } - - private int disposeAttempts = 0; - - protected void Dispose(RedisClient redisClient) - { - if (redisClient == null) return; - try - { - redisClient.DisposeConnection(); - } - catch (Exception ex) - { - Log.Error(string.Format( - "Error when trying to dispose of RedisClient to host {0}:{1}", - redisClient.Host, redisClient.Port), ex); - } - } - } + if (disposing) + { + // get rid of managed resources + } + + try + { + // get rid of unmanaged resources + for (var i = 0; i < writeClients.Length; i++) + { + Dispose(writeClients[i]); + } + for (var i = 0; i < readClients.Length; i++) + { + Dispose(readClients[i]); + } + } + catch (Exception ex) + { + Log.Error("Error when trying to dispose of PooledRedisClientManager", ex); + } + + RedisState.DisposeAllDeactivatedClients(); + } + + private int disposeAttempts = 0; + + protected void Dispose(RedisClient redisClient) + { + if (redisClient == null) return; + try + { + redisClient.DisposeConnection(); + } + catch (Exception ex) + { + Log.Error($"Error when trying to dispose of RedisClient to host {redisClient.Host}:{redisClient.Port}", ex); + } + } + + public ICacheClient GetCacheClient() + { + return new RedisClientManagerCacheClient(this); + } + + public ICacheClient GetReadOnlyCacheClient() + { + return new RedisClientManagerCacheClient(this) { ReadOnly = true }; + } + } + + public partial class PooledRedisClientManager : IRedisClientCacheManager + { + /// + /// Manage a client acquired from the PooledRedisClientManager + /// Dispose method will release the client back to the pool. + /// + public class DisposablePooledClient : IDisposable where T : RedisNativeClient + { + private T client; + private readonly PooledRedisClientManager clientManager; + + /// + /// wrap the acquired client + /// + /// + public DisposablePooledClient(PooledRedisClientManager clientManager) + { + this.clientManager = clientManager; + if (clientManager != null) + client = (T)clientManager.GetClient(); + } + + /// + /// access the wrapped client + /// + public T Client => client; + + /// + /// release the wrapped client back to the pool + /// + public void Dispose() + { + if (client != null) + clientManager.DisposeClient(client); + client = null; + } + } + + public DisposablePooledClient GetDisposableClient() where T : RedisNativeClient + { + return new DisposablePooledClient(this); + } + } + } diff --git a/src/ServiceStack.Redis/Properties/AssemblyInfo.cs b/src/ServiceStack.Redis/Properties/AssemblyInfo.cs index c65f0e11..4ce9daba 100644 --- a/src/ServiceStack.Redis/Properties/AssemblyInfo.cs +++ b/src/ServiceStack.Redis/Properties/AssemblyInfo.cs @@ -1,38 +1,5 @@ -using System.Reflection; -using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -// General Information about an assembly is controlled through the following -// set of attributes. Change these attribute values to modify the information -// associated with an assembly. -[assembly: AssemblyTitle("ServiceStack.Redis")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("ServiceStack.Redis")] -[assembly: AssemblyCopyright("Copyright © ServiceStack 2013")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from -// COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] - -// The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("70a33fa7-9f81-418d-bb25-6a4be6648ae4")] - -// Version information for an assembly consists of the following four values: -// -// Major Version -// Minor Version -// Build Number -// Revision -// -// You can specify all the values or you can default the Build and Revision Numbers -// by using the '*' as shown below: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("3.9.43.0")] -//[assembly: AssemblyFileVersion("1.0.0.0")] - -[assembly: InternalsVisibleTo("ServiceStack.Redis.Tests")] +[assembly: System.Reflection.AssemblyVersion("6.0.0.0")] \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisCacheClientFactory.cs b/src/ServiceStack.Redis/RedisCacheClientFactory.cs deleted file mode 100644 index e4f34ce6..00000000 --- a/src/ServiceStack.Redis/RedisCacheClientFactory.cs +++ /dev/null @@ -1,30 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Net; - -namespace ServiceStack.Redis -{ - /// - /// Provide the factory implementation for creating a RedisCacheClient that - /// can be mocked and used by different 'Redis Client Managers' - /// - public class RedisCacheClientFactory : IRedisClientFactory - { - public static RedisCacheClientFactory Instance = new RedisCacheClientFactory(); - - public RedisClient CreateRedisClient(string host, int port) - { - return new RedisClient(host, port); - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient.Async.cs b/src/ServiceStack.Redis/RedisClient.Async.cs new file mode 100644 index 00000000..6648a224 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient.Async.cs @@ -0,0 +1,1558 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis/ +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Caching; +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisClient : IRedisClientAsync, IRemoveByPatternAsync, ICacheClientAsync, IAsyncDisposable + { + /// + /// Access this instance for async usage + /// + public IRedisClientAsync AsAsync() => this; + + // the typed client implements this for us + IRedisTypedClientAsync IRedisClientAsync.As() => (IRedisTypedClientAsync)As(); + + // convenience since we're not saturating the public API; this makes it easy to call + // the explicit interface implementations; the JIT should make this a direct call + private IRedisNativeClientAsync NativeAsync => this; + + IHasNamed IRedisClientAsync.Lists => Lists as IHasNamed ?? throw new NotSupportedException($"The provided Lists ({Lists?.GetType().FullName}) does not support IRedisListAsync"); + IHasNamed IRedisClientAsync.Sets => Sets as IHasNamed ?? throw new NotSupportedException($"The provided Sets ({Sets?.GetType().FullName})does not support IRedisSetAsync"); + IHasNamed IRedisClientAsync.SortedSets => SortedSets as IHasNamed ?? throw new NotSupportedException($"The provided SortedSets ({SortedSets?.GetType().FullName})does not support IRedisSortedSetAsync"); + IHasNamed IRedisClientAsync.Hashes => Hashes as IHasNamed ?? throw new NotSupportedException($"The provided Hashes ({Hashes?.GetType().FullName})does not support IRedisHashAsync"); + + internal ValueTask RegisterTypeIdAsync(T value, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + var id = value.GetId().ToString(); + + return RegisterTypeIdAsync(typeIdsSetKey, id, token); + } + internal ValueTask RegisterTypeIdAsync(string typeIdsSetKey, string id, CancellationToken token) + { + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + registeredTypeIdsWithinPipeline.Add(id); + return default; + } + else + { + return AsAsync().AddItemToSetAsync(typeIdsSetKey, id, token); + } + } + + // Called just after original Pipeline is closed. + internal async ValueTask AddTypeIdsRegisteredDuringPipelineAsync(CancellationToken token) + { + foreach (var entry in registeredTypeIdsWithinPipelineMap) + { + await AsAsync().AddRangeToSetAsync(entry.Key, entry.Value.ToList(), token).ConfigureAwait(false); + } + registeredTypeIdsWithinPipelineMap = new Dictionary>(); + } + + + ValueTask IRedisClientAsync.GetServerTimeAsync(CancellationToken token) + => NativeAsync.TimeAsync(token).Await(parts => ParseTimeResult(parts)); + + IRedisPipelineAsync IRedisClientAsync.CreatePipeline() + => new RedisAllPurposePipeline(this); + + ValueTask IRedisClientAsync.CreateTransactionAsync(CancellationToken token) + { + AssertServerVersionNumber(); // pre-fetch call to INFO before transaction if needed + return new RedisTransaction(this, true).AsValueTaskResult(); // note that the MULTI here will be held and flushed async + } + + ValueTask IRedisClientAsync.RemoveEntryAsync(string[] keys, CancellationToken token) + => keys.Length == 0 ? default : NativeAsync.DelAsync(keys, token).IsSuccessAsync(); + + private async ValueTask ExecAsync(Func action) + { + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) + { + await action(this).ConfigureAwait(false); + } + } + + private async ValueTask ExecAsync(Func> action) + { + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) + { + var ret = await action(this).ConfigureAwait(false); + return ret; + } + } + + ValueTask IRedisClientAsync.SetValueAsync(string key, string value, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + return NativeAsync.SetAsync(key, bytesValue, token: token); + } + + ValueTask IRedisClientAsync.GetValueAsync(string key, CancellationToken token) + => NativeAsync.GetAsync(key, token).FromUtf8BytesAsync(); + + Task ICacheClientAsync.GetAsync(string key, CancellationToken token) + { + return ExecAsync(async r => { + if (typeof(T) == typeof(byte[])) + { + var ret = await ((IRedisNativeClientAsync) r).GetAsync(key, token).ConfigureAwait(false); + return (T) (object) ret; + } + else + { + var val = await r.GetValueAsync(key, token).ConfigureAwait(false); + var ret = JsonSerializer.DeserializeFromString(val); + return ret; + } + }).AsTask(); + } + + async ValueTask> IRedisClientAsync.SearchKeysAsync(string pattern, CancellationToken token) + { + var list = new List(); + await foreach (var value in ((IRedisClientAsync)this).ScanAllKeysAsync(pattern, token: token).WithCancellation(token).ConfigureAwait(false)) + { + list.Add(value); + } + return list; + } + + async IAsyncEnumerable IRedisClientAsync.ScanAllKeysAsync(string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + ScanResult ret = default; + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.ScanAsync(ret?.Cursor ?? 0, pageSize, match: pattern, token: token) + : NativeAsync.ScanAsync(ret?.Cursor ?? 0, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var key in ret.Results) + { + yield return key.FromUtf8Bytes(); + } + + if (ret.Cursor == 0) break; + } + } + + ValueTask IRedisClientAsync.GetEntryTypeAsync(string key, CancellationToken token) + => NativeAsync.TypeAsync(key, token).Await((val, state) => state.ParseEntryType(val), this); + + ValueTask IRedisClientAsync.AddItemToSetAsync(string setId, string item, CancellationToken token) + => NativeAsync.SAddAsync(setId, item.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.AddItemToListAsync(string listId, string value, CancellationToken token) + => NativeAsync.RPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.AddItemToSortedSetAsync(string setId, string value, CancellationToken token) + => ((IRedisClientAsync)this).AddItemToSortedSetAsync(setId, value, GetLexicalScore(value), token); + + ValueTask IRedisClientAsync.AddItemToSortedSetAsync(string setId, string value, double score, CancellationToken token) + => NativeAsync.ZAddAsync(setId, score, value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetEntryInHashAsync(string hashId, string key, string value, CancellationToken token) + => NativeAsync.HSetAsync(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetAllAsync(IDictionary map, CancellationToken token) + => GetSetAllBytes(map, out var keyBytes, out var valBytes) ? NativeAsync.MSetAsync(keyBytes, valBytes, token) : default; + + ValueTask IRedisClientAsync.SetAllAsync(IEnumerable keys, IEnumerable values, CancellationToken token) + => GetSetAllBytes(keys, values, out var keyBytes, out var valBytes) ? NativeAsync.MSetAsync(keyBytes, valBytes, token) : default; + + Task ICacheClientAsync.SetAllAsync(IDictionary values, CancellationToken token) + { + if (values.Count != 0) + { + return ExecAsync(r => + { + // need to do this inside Exec for the JSON config bits + GetSetAllBytesTyped(values, out var keys, out var valBytes); + return ((IRedisNativeClientAsync)r).MSetAsync(keys, valBytes, token); + }).AsTask(); + } + else + { + return Task.CompletedTask; + } + } + + ValueTask IRedisClientAsync.RenameKeyAsync(string fromName, string toName, CancellationToken token) + => NativeAsync.RenameAsync(fromName, toName, token); + + ValueTask IRedisClientAsync.ContainsKeyAsync(string key, CancellationToken token) + => NativeAsync.ExistsAsync(key, token).IsSuccessAsync(); + + + ValueTask IRedisClientAsync.GetRandomKeyAsync(CancellationToken token) + => NativeAsync.RandomKeyAsync(token); + + ValueTask IRedisClientAsync.SelectAsync(long db, CancellationToken token) + => NativeAsync.SelectAsync(db, token); + + ValueTask IRedisClientAsync.ExpireEntryInAsync(string key, TimeSpan expireIn, CancellationToken token) + => UseMillisecondExpiration(expireIn) + ? NativeAsync.PExpireAsync(key, (long)expireIn.TotalMilliseconds, token) + : NativeAsync.ExpireAsync(key, (int)expireIn.TotalSeconds, token); + + ValueTask IRedisClientAsync.ExpireEntryAtAsync(string key, DateTime expireAt, CancellationToken token) + => AssertServerVersionNumber() >= 2600 + ? NativeAsync.PExpireAtAsync(key, ConvertToServerDate(expireAt).ToUnixTimeMs(), token) + : NativeAsync.ExpireAtAsync(key, ConvertToServerDate(expireAt).ToUnixTime(), token); + + Task ICacheClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + => NativeAsync.TtlAsync(key, token).Await(ParseTimeToLiveResult).AsTask(); + + ValueTask IRedisClientAsync.PingAsync(CancellationToken token) + => NativeAsync.PingAsync(token); + + ValueTask IRedisClientAsync.EchoAsync(string text, CancellationToken token) + => NativeAsync.EchoAsync(text, token); + + ValueTask IRedisClientAsync.ForegroundSaveAsync(CancellationToken token) + => NativeAsync.SaveAsync(token); + + ValueTask IRedisClientAsync.BackgroundSaveAsync(CancellationToken token) + => NativeAsync.BgSaveAsync(token); + + ValueTask IRedisClientAsync.ShutdownAsync(CancellationToken token) + => NativeAsync.ShutdownAsync(false, token); + + ValueTask IRedisClientAsync.ShutdownNoSaveAsync(CancellationToken token) + => NativeAsync.ShutdownAsync(true, token); + + ValueTask IRedisClientAsync.BackgroundRewriteAppendOnlyFileAsync(CancellationToken token) + => NativeAsync.BgRewriteAofAsync(token); + + ValueTask IRedisClientAsync.FlushDbAsync(CancellationToken token) + => NativeAsync.FlushDbAsync(token); + + ValueTask> IRedisClientAsync.GetValuesAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new List().AsValueTaskResult(); + + return NativeAsync.MGetAsync(keys.ToArray(), token).Await(ParseGetValuesResult); + } + + ValueTask> IRedisClientAsync.GetValuesAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new List().AsValueTaskResult(); + + return NativeAsync.MGetAsync(keys.ToArray(), token).Await(ParseGetValuesResult); + } + + ValueTask> IRedisClientAsync.GetValuesMapAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new Dictionary().AsValueTaskResult(); + + var keysArray = keys.ToArray(); + return NativeAsync.MGetAsync(keysArray, token).Await((resultBytesArray, state) => ParseGetValuesMapResult(state, resultBytesArray), keysArray); + } + + ValueTask> IRedisClientAsync.GetValuesMapAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new Dictionary().AsValueTaskResult(); + + var keysArray = keys.ToArray(); + return NativeAsync.MGetAsync(keysArray, token).Await((resultBytesArray, state) => ParseGetValuesMapResult(state, resultBytesArray), keysArray); + } + + ValueTask IRedisClientAsync.AcquireLockAsync(string key, TimeSpan? timeOut, CancellationToken token) + => RedisLock.CreateAsync(this, key, timeOut, token).Await(value => value); + + ValueTask IRedisClientAsync.SetValueAsync(string key, string value, TimeSpan expireIn, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + + if (AssertServerVersionNumber() >= 2610) + { + PickTime(expireIn, out var seconds, out var milliseconds); + return NativeAsync.SetAsync(key, bytesValue, expirySeconds: seconds, + expiryMilliseconds: milliseconds, token: token); + } + else + { + return NativeAsync.SetExAsync(key, (int)expireIn.TotalSeconds, bytesValue, token); + } + } + + static void PickTime(TimeSpan? value, out long expirySeconds, out long expiryMilliseconds) + { + expirySeconds = expiryMilliseconds = 0; + if (value.HasValue) + { + var expireIn = value.GetValueOrDefault(); + if (expireIn.Milliseconds > 0) + { + expiryMilliseconds = (long)expireIn.TotalMilliseconds; + } + else + { + expirySeconds = (long)expireIn.TotalSeconds; + } + } + } + ValueTask IRedisClientAsync.SetValueIfNotExistsAsync(string key, string value, TimeSpan? expireIn, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + PickTime(expireIn, out var seconds, out var milliseconds); + return NativeAsync.SetAsync(key, bytesValue, false, seconds, milliseconds, token); + } + + ValueTask IRedisClientAsync.SetValueIfExistsAsync(string key, string value, TimeSpan? expireIn, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + PickTime(expireIn, out var seconds, out var milliseconds); + return NativeAsync.SetAsync(key, bytesValue, true, seconds, milliseconds, token); + } + + ValueTask IRedisClientAsync.WatchAsync(string[] keys, CancellationToken token) + => NativeAsync.WatchAsync(keys, token); + + ValueTask IRedisClientAsync.UnWatchAsync(CancellationToken token) + => NativeAsync.UnWatchAsync(token); + + ValueTask IRedisClientAsync.AppendToValueAsync(string key, string value, CancellationToken token) + => NativeAsync.AppendAsync(key, value.ToUtf8Bytes(), token); + + async ValueTask IRedisClientAsync.StoreObjectAsync(object entity, CancellationToken token) + { + if (entity == null) throw new ArgumentNullException(nameof(entity)); + + var id = entity.GetObjectId(); + var entityType = entity.GetType(); + var urnKey = UrnKey(entityType, id); + var valueString = JsonSerializer.SerializeToString(entity); + + await ((IRedisClientAsync)this).SetValueAsync(urnKey, valueString, token).ConfigureAwait(false); + + await RegisterTypeIdAsync(GetTypeIdsSetKey(entityType), id.ToString(), token).ConfigureAwait(false); + + return entity; + } + + ValueTask IRedisClientAsync.PopItemFromSetAsync(string setId, CancellationToken token) + => NativeAsync.SPopAsync(setId, token).FromUtf8BytesAsync(); + + ValueTask> IRedisClientAsync.PopItemsFromSetAsync(string setId, int count, CancellationToken token) + => NativeAsync.SPopAsync(setId, count, token).ToStringListAsync(); + + ValueTask IRedisClientAsync.SlowlogResetAsync(CancellationToken token) + => NativeAsync.SlowlogResetAsync(token); + + ValueTask IRedisClientAsync.GetSlowlogAsync(int? numberOfRecords, CancellationToken token) + => NativeAsync.SlowlogGetAsync(numberOfRecords, token).Await(ParseSlowlog); + + + Task ICacheClientAsync.SetAsync(string key, T value, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), token: token)).AwaitAsTrueTask(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, CancellationToken token) + => NativeAsync.ZCardAsync(setId, token); + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetSortedSetCountAsync(setId, fromScore, toScore, token); + } + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, double fromScore, double toScore, CancellationToken token) + => NativeAsync.ZCountAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, long fromScore, long toScore, CancellationToken token) + => NativeAsync.ZCountAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.GetItemScoreInSortedSetAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZScoreAsync(setId, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.CustomAsync(object[] cmdWithArgs, CancellationToken token) + => RawCommandAsync(token, cmdWithArgs).Await(result => result.ToRedisText()); + + ValueTask IRedisClientAsync.SetValuesAsync(IDictionary map, CancellationToken token) + => ((IRedisClientAsync)this).SetAllAsync(map, token); + + Task ICacheClientAsync.SetAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + AssertNotInTransaction(); + return ExecAsync(async r => + { + await r.SetAsync(key, value, token).ConfigureAwait(false); + await r.ExpireEntryAtAsync(key, ConvertToServerDate(expiresAt), token).ConfigureAwait(false); + }).AwaitAsTrueTask(); + } + Task ICacheClientAsync.SetAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + if (AssertServerVersionNumber() >= 2600) + { + return ExecAsync(r => ((IRedisNativeClientAsync)r) + .SetAsync(key, ToBytes(value), 0, expiryMilliseconds: (long)expiresIn.TotalMilliseconds, token)).AwaitAsTrueTask(); + } + else + { + return ExecAsync(r => ((IRedisNativeClientAsync)r) + .SetExAsync(key, (int)expiresIn.TotalSeconds, ToBytes(value), token)).AwaitAsTrueTask(); + } + } + + Task ICacheClientAsync.FlushAllAsync(CancellationToken token) + => NativeAsync.FlushAllAsync(token).AsTask(); + + Task> ICacheClientAsync.GetAllAsync(IEnumerable keys, CancellationToken token) + { + return ExecAsync(r => + { + var keysArray = keys.ToArray(); + + return ((IRedisNativeClientAsync)r).MGetAsync(keysArray, token).Await((keyValues, state) => ProcessGetAllResult(state, keyValues), keysArray); + }).AsTask(); + } + + Task ICacheClientAsync.RemoveAsync(string key, CancellationToken token) + => NativeAsync.DelAsync(key, token).IsSuccessTaskAsync(); + + IAsyncEnumerable ICacheClientAsync.GetKeysByPatternAsync(string pattern, CancellationToken token) + => AsAsync().ScanAllKeysAsync(pattern, token: token); + + Task ICacheClientAsync.RemoveExpiredEntriesAsync(CancellationToken token) + { + //Redis automatically removed expired Cache Entries + return Task.CompletedTask; + } + + async Task IRemoveByPatternAsync.RemoveByPatternAsync(string pattern, CancellationToken token) + { + List buffer = null; + const int BATCH_SIZE = 1024; + await foreach (var key in AsAsync().ScanAllKeysAsync(pattern, token: token).WithCancellation(token).ConfigureAwait(false)) + { + (buffer ??= new List()).Add(key); + if (buffer.Count == BATCH_SIZE) + { + await NativeAsync.DelAsync(buffer.ToArray(), token).ConfigureAwait(false); + buffer.Clear(); + } + } + if (buffer is object && buffer.Count != 0) + { + await NativeAsync.DelAsync(buffer.ToArray(), token).ConfigureAwait(false); + } + } + + Task IRemoveByPatternAsync.RemoveByRegexAsync(string regex, CancellationToken token) + => AsAsync().RemoveByPatternAsync(RegexToGlob(regex), token); + + Task ICacheClientAsync.RemoveAllAsync(IEnumerable keys, CancellationToken token) + => ExecAsync(r => r.RemoveEntryAsync(keys.ToArray(), token)).AsTask(); + + Task ICacheClientAsync.IncrementAsync(string key, uint amount, CancellationToken token) + => ExecAsync(r => r.IncrementValueByAsync(key, (int)amount, token)).AsTask(); + + Task ICacheClientAsync.DecrementAsync(string key, uint amount, CancellationToken token) + => ExecAsync(r => r.DecrementValueByAsync(key, (int)amount, token)).AsTask(); + + + Task ICacheClientAsync.AddAsync(string key, T value, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: false, token: token)).AsTask(); + + Task ICacheClientAsync.ReplaceAsync(string key, T value, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: true, token: token)).AsTask(); + + Task ICacheClientAsync.AddAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + AssertNotInTransaction(); + + return ExecAsync(async r => + { + if (await r.AddAsync(key, value, token).ConfigureAwait(false)) + { + await r.ExpireEntryAtAsync(key, ConvertToServerDate(expiresAt), token).ConfigureAwait(false); + return true; + } + return false; + }).AsTask(); + } + + Task ICacheClientAsync.ReplaceAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + AssertNotInTransaction(); + + return ExecAsync(async r => + { + if (await r.ReplaceAsync(key, value, token).ConfigureAwait(false)) + { + await r.ExpireEntryAtAsync(key, ConvertToServerDate(expiresAt), token).ConfigureAwait(false); + return true; + } + return false; + }).AsTask(); + } + + Task ICacheClientAsync.AddAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: false, token: token)).AsTask(); + + Task ICacheClientAsync.ReplaceAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: true, token: token)).AsTask(); + + ValueTask IRedisClientAsync.DbSizeAsync(CancellationToken token) + => NativeAsync.DbSizeAsync(token); + + ValueTask> IRedisClientAsync.InfoAsync(CancellationToken token) + => NativeAsync.InfoAsync(token); + + ValueTask IRedisClientAsync.LastSaveAsync(CancellationToken token) + => NativeAsync.LastSaveAsync(token); + + async Task IEntityStoreAsync.GetByIdAsync(object id, CancellationToken token) + { + var key = UrnKey(id); + var valueString = await AsAsync().GetValueAsync(key, token).ConfigureAwait(false); + var value = JsonSerializer.DeserializeFromString(valueString); + return value; + } + + async Task> IEntityStoreAsync.GetByIdsAsync(ICollection ids, CancellationToken token) + { + if (ids == null || ids.Count == 0) + return new List(); + + var urnKeys = ids.Cast().Map(UrnKey); + return await AsAsync().GetValuesAsync(urnKeys, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.StoreAsync(T entity, CancellationToken token) + { + var urnKey = UrnKey(entity); + var valueString = JsonSerializer.SerializeToString(entity); + + await AsAsync().SetValueAsync(urnKey, valueString, token).ConfigureAwait(false); + await RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + + return entity; + } + + Task IEntityStoreAsync.StoreAllAsync(IEnumerable entities, CancellationToken token) + => StoreAllAsyncImpl(entities, token).AsTask(); + + internal async ValueTask StoreAllAsyncImpl(IEnumerable entities, CancellationToken token) + { + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + await NativeAsync.MSetAsync(keys, values, token).ConfigureAwait(false); + await RegisterTypeIdsAsync(entitiesList, token).ConfigureAwait(false); + } + } + + internal ValueTask RegisterTypeIdsAsync(IEnumerable values, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + var ids = values.Map(x => x.GetId().ToString()); + + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + ids.ForEach(x => registeredTypeIdsWithinPipeline.Add(x)); + return default; + } + else + { + return AsAsync().AddRangeToSetAsync(typeIdsSetKey, ids, token); + } + } + + internal ValueTask RemoveTypeIdsByValueAsync(T value, CancellationToken token) => + RemoveTypeIdsByIdAsync(value.GetId().ToString(), token); + internal async ValueTask RemoveTypeIdsByValuesAsync(T[] values, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + values.Each(x => registeredTypeIdsWithinPipeline.Remove(x.GetId().ToString())); + } + else + { + foreach (var x in values) + { + await AsAsync().RemoveItemFromSetAsync(typeIdsSetKey, x.GetId().ToString(), token).ConfigureAwait(false); + } + } + } + + internal async ValueTask RemoveTypeIdsByIdAsync(string id, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey).Remove(id); + else + { + await AsAsync().RemoveItemFromSetAsync(typeIdsSetKey, id, token).ConfigureAwait(false); + } + } + + internal async ValueTask RemoveTypeIdsByIdsAsync(string[] ids, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + ids.Each(x => registeredTypeIdsWithinPipeline.Remove(x)); + } + else + { + foreach (var x in ids) + { + await AsAsync().RemoveItemFromSetAsync(typeIdsSetKey, x, token).ConfigureAwait(false); + } + } + } + + async Task IEntityStoreAsync.DeleteAsync(T entity, CancellationToken token) + { + var urnKey = UrnKey(entity); + await AsAsync().RemoveAsync(urnKey, token).ConfigureAwait(false); + await this.RemoveTypeIdsByValueAsync(entity, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdAsync(object id, CancellationToken token) + { + var urnKey = UrnKey(id); + await AsAsync().RemoveAsync(urnKey, token).ConfigureAwait(false); + await this.RemoveTypeIdsByIdAsync(id.ToString(), token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdsAsync(ICollection ids, CancellationToken token) + { + if (ids == null || ids.Count == 0) return; + + var idStrings = ids.Cast().Select(x => x.ToString()).ToArray(); + var urnKeys = idStrings.Select(UrnKey).ToArray(); + await AsAsync().RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + await this.RemoveTypeIdsByIdsAsync(idStrings, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteAllAsync(CancellationToken token) + { + await DeleteAllAsync(0, RedisConfig.CommandKeysBatchSize, token).ConfigureAwait(false); + } + + private async Task DeleteAllAsync(ulong cursor, int batchSize, CancellationToken token) + { + var typeIdsSetKey = this.GetTypeIdsSetKey(); + var asyncClient = AsAsync(); + do + { + var scanResult = await NativeAsync.SScanAsync(typeIdsSetKey, cursor, batchSize, token: token).ConfigureAwait(false); + cursor = scanResult.Cursor; + var urnKeys = scanResult.Results.Select(id => UrnKey(id.FromUtf8Bytes())).ToArray(); + if (urnKeys.Length > 0) + { + await asyncClient.RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + } + } while (cursor != 0); + await asyncClient.RemoveEntryAsync(new[] { typeIdsSetKey }, token).ConfigureAwait(false); + } + + ValueTask> IRedisClientAsync.SearchSortedSetAsync(string setId, string start, string end, int? skip, int? take, CancellationToken token) + { + start = GetSearchStart(start); + end = GetSearchEnd(end); + + return NativeAsync.ZRangeByLexAsync(setId, start, end, skip, take, token).ToStringListAsync(); + } + + ValueTask IRedisClientAsync.SearchSortedSetCountAsync(string setId, string start, string end, CancellationToken token) + => NativeAsync.ZLexCountAsync(setId, GetSearchStart(start), GetSearchEnd(end), token); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetBySearchAsync(string setId, string start, string end, CancellationToken token) + => NativeAsync.ZRemRangeByLexAsync(setId, GetSearchStart(start), GetSearchEnd(end), token); + + ValueTask IRedisClientAsync.TypeAsync(string key, CancellationToken token) + => NativeAsync.TypeAsync(key, token); + + ValueTask IRedisClientAsync.GetStringCountAsync(string key, CancellationToken token) + => NativeAsync.StrLenAsync(key, token); + + ValueTask IRedisClientAsync.GetSetCountAsync(string setId, CancellationToken token) + => NativeAsync.SCardAsync(setId, token); + + ValueTask IRedisClientAsync.GetListCountAsync(string listId, CancellationToken token) + => NativeAsync.LLenAsync(listId, token); + + ValueTask IRedisClientAsync.GetHashCountAsync(string hashId, CancellationToken token) + => NativeAsync.HLenAsync(hashId, token); + + async ValueTask IRedisClientAsync.ExecCachedLuaAsync(string scriptBody, Func> scriptSha1, CancellationToken token) + { + if (!CachedLuaSha1Map.TryGetValue(scriptBody, out var sha1)) + CachedLuaSha1Map[scriptBody] = sha1 = await AsAsync().LoadLuaScriptAsync(scriptBody, token).ConfigureAwait(false); + + try + { + return await scriptSha1(sha1).ConfigureAwait(false); + } + catch (RedisResponseException ex) + { + if (!ex.Message.StartsWith("NOSCRIPT")) + throw; + + CachedLuaSha1Map[scriptBody] = sha1 = await AsAsync().LoadLuaScriptAsync(scriptBody, token).ConfigureAwait(false); + return await scriptSha1(sha1).ConfigureAwait(false); + } + } + + ValueTask IRedisClientAsync.ExecLuaAsync(string luaBody, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalCommandAsync(luaBody, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token).Await(data => data.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaShaAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaCommandAsync(sha1, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token).Await(data => data.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaAsStringAsync(string luaBody, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalStrAsync(luaBody, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsStringAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaStrAsync(sha1, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token); + + ValueTask IRedisClientAsync.LoadLuaScriptAsync(string body, CancellationToken token) + => NativeAsync.ScriptLoadAsync(body, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.WriteAllAsync(IEnumerable entities, CancellationToken token) + => PrepareWriteAll(entities, out var keys, out var values) ? NativeAsync.MSetAsync(keys, values, token) : default; + + async ValueTask> IRedisClientAsync.GetAllItemsFromSetAsync(string setId, CancellationToken token) + { + var multiDataList = await NativeAsync.SMembersAsync(setId, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + async ValueTask IRedisClientAsync.AddRangeToSetAsync(string setId, List items, CancellationToken token) + { + if (await AddRangeToSetNeedsSendAsync(setId, items).ConfigureAwait(false)) + { + var uSetId = setId.ToUtf8Bytes(); + var pipeline = CreatePipelineCommand(); + foreach (var item in items) + { + pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); + } + await pipeline.FlushAsync(token).ConfigureAwait(false); + + //the number of items after + _ = await pipeline.ReadAllAsIntsAsync(token).ConfigureAwait(false); + } + } + + async ValueTask AddRangeToSetNeedsSendAsync(string setId, List items) + { + if (setId.IsNullOrEmpty()) + throw new ArgumentNullException("setId"); + if (items == null) + throw new ArgumentNullException("items"); + if (items.Count == 0) + return false; + + if (this.Transaction is object || this.PipelineAsync is object) + { + var queueable = this.Transaction as IRedisQueueableOperationAsync + ?? this.Pipeline as IRedisQueueableOperationAsync; + + if (queueable == null) + throw new NotSupportedException("Cannot AddRangeToSetAsync() when Transaction is: " + this.Transaction.GetType().Name); + + //Complete the first QueuedCommand() + await AsAsync().AddItemToSetAsync(setId, items[0]).ConfigureAwait(false); + + //Add subsequent queued commands + for (var i = 1; i < items.Count; i++) + { + var item = items[i]; + queueable.QueueCommand(c => c.AddItemToSetAsync(setId, item)); + } + return false; + } + else + { + return true; + } + } + + ValueTask IRedisClientAsync.RemoveItemFromSetAsync(string setId, string item, CancellationToken token) + => NativeAsync.SRemAsync(setId, item.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.IncrementValueByAsync(string key, int count, CancellationToken token) + => NativeAsync.IncrByAsync(key, count, token); + + ValueTask IRedisClientAsync.IncrementValueByAsync(string key, long count, CancellationToken token) + => NativeAsync.IncrByAsync(key, count, token); + + ValueTask IRedisClientAsync.IncrementValueByAsync(string key, double count, CancellationToken token) + => NativeAsync.IncrByFloatAsync(key, count, token); + ValueTask IRedisClientAsync.IncrementValueAsync(string key, CancellationToken token) + => NativeAsync.IncrAsync(key, token); + + ValueTask IRedisClientAsync.DecrementValueAsync(string key, CancellationToken token) + => NativeAsync.DecrAsync(key, token); + + ValueTask IRedisClientAsync.DecrementValueByAsync(string key, int count, CancellationToken token) + => NativeAsync.DecrByAsync(key, count, token); + + async ValueTask IRedisClientAsync.GetServerRoleAsync(CancellationToken token) + { + if (AssertServerVersionNumber() >= 2812) + { + var text = await NativeAsync.RoleAsync(token).ConfigureAwait(false); + var roleName = text.Children[0].Text; + return ToServerRole(roleName); + } + + var info = await AsAsync().InfoAsync(token).ConfigureAwait(false); + info.TryGetValue("role", out var role); + return ToServerRole(role); + } + + ValueTask IRedisClientAsync.GetServerRoleInfoAsync(CancellationToken token) + => NativeAsync.RoleAsync(token); + + async ValueTask IRedisClientAsync.GetConfigAsync(string configItem, CancellationToken token) + { + var byteArray = await NativeAsync.ConfigGetAsync(configItem, token).ConfigureAwait(false); + return GetConfigParse(byteArray); + } + + ValueTask IRedisClientAsync.SetConfigAsync(string configItem, string value, CancellationToken token) + => NativeAsync.ConfigSetAsync(configItem, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.SaveConfigAsync(CancellationToken token) + => NativeAsync.ConfigRewriteAsync(token); + + ValueTask IRedisClientAsync.ResetInfoStatsAsync(CancellationToken token) + => NativeAsync.ConfigResetStatAsync(token); + + ValueTask IRedisClientAsync.GetClientAsync(CancellationToken token) + => NativeAsync.ClientGetNameAsync(token); + + ValueTask IRedisClientAsync.SetClientAsync(string name, CancellationToken token) + => NativeAsync.ClientSetNameAsync(name, token); + + ValueTask IRedisClientAsync.KillClientAsync(string address, CancellationToken token) + => NativeAsync.ClientKillAsync(address, token); + + ValueTask IRedisClientAsync.KillClientsAsync(string fromAddress, string withId, RedisClientType? ofType, bool? skipMe, CancellationToken token) + { + var typeString = ofType?.ToString().ToLower(); + var skipMeString = skipMe.HasValue ? (skipMe.Value ? "yes" : "no") : null; + return NativeAsync.ClientKillAsync(addr: fromAddress, id: withId, type: typeString, skipMe: skipMeString, token); + } + + async ValueTask>> IRedisClientAsync.GetClientsInfoAsync(CancellationToken token) + => GetClientsInfoParse(await NativeAsync.ClientListAsync(token).ConfigureAwait(false)); + + ValueTask IRedisClientAsync.PauseAllClientsAsync(TimeSpan duration, CancellationToken token) + => NativeAsync.ClientPauseAsync((int)duration.TotalMilliseconds, token); + + ValueTask> IRedisClientAsync.GetAllKeysAsync(CancellationToken token) + => AsAsync().SearchKeysAsync("*", token); + + ValueTask IRedisClientAsync.GetAndSetValueAsync(string key, string value, CancellationToken token) + => NativeAsync.GetSetAsync(key, value.ToUtf8Bytes(), token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.GetFromHashAsync(object id, CancellationToken token) + { + var key = UrnKey(id); + return (await AsAsync().GetAllEntriesFromHashAsync(key, token).ConfigureAwait(false)).ToJson().FromJson(); + } + + async ValueTask IRedisClientAsync.StoreAsHashAsync(T entity, CancellationToken token) + { + var key = UrnKey(entity); + var hash = ConvertToHashFn(entity); + await AsAsync().SetRangeInHashAsync(key, hash, token).ConfigureAwait(false); + await RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + } + + ValueTask> IRedisClientAsync.GetSortedEntryValuesAsync(string setId, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + return NativeAsync.SortAsync(setId, sortOptions, token).ToStringListAsync(); + } + + async IAsyncEnumerable IRedisClientAsync.ScanAllSetItemsAsync(string setId, string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + var ret = new ScanResult(); + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.SScanAsync(setId, ret.Cursor, pageSize, match: pattern, token: token) + : NativeAsync.SScanAsync(setId, ret.Cursor, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var key in ret.Results) + { + yield return key.FromUtf8Bytes(); + } + + if (ret.Cursor == 0) break; + } + } + + async IAsyncEnumerable> IRedisClientAsync.ScanAllSortedSetItemsAsync(string setId, string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + var ret = new ScanResult(); + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.ZScanAsync(setId, ret.Cursor, pageSize, match: pattern, token: token) + : NativeAsync.ZScanAsync(setId, ret.Cursor, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var entry in ret.AsItemsWithScores()) + { + yield return entry; + } + + if (ret.Cursor == 0) break; + } + } + + async IAsyncEnumerable> IRedisClientAsync.ScanAllHashEntriesAsync(string hashId, string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + var ret = new ScanResult(); + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.HScanAsync(hashId, ret.Cursor, pageSize, match: pattern, token: token) + : NativeAsync.HScanAsync(hashId, ret.Cursor, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var entry in ret.AsKeyValues()) + { + yield return entry; + } + + if (ret.Cursor == 0) break; + } + } + + ValueTask IRedisClientAsync.AddToHyperLogAsync(string key, string[] elements, CancellationToken token) + => NativeAsync.PfAddAsync(key, elements.Map(x => x.ToUtf8Bytes()).ToArray(), token); + + ValueTask IRedisClientAsync.CountHyperLogAsync(string key, CancellationToken token) + => NativeAsync.PfCountAsync(key, token); + + ValueTask IRedisClientAsync.MergeHyperLogsAsync(string toKey, string[] fromKeys, CancellationToken token) + => NativeAsync.PfMergeAsync(toKey, fromKeys, token); + + ValueTask IRedisClientAsync.AddGeoMemberAsync(string key, double longitude, double latitude, string member, CancellationToken token) + => NativeAsync.GeoAddAsync(key, longitude, latitude, member, token); + + ValueTask IRedisClientAsync.AddGeoMembersAsync(string key, RedisGeo[] geoPoints, CancellationToken token) + => NativeAsync.GeoAddAsync(key, geoPoints, token); + + ValueTask IRedisClientAsync.CalculateDistanceBetweenGeoMembersAsync(string key, string fromMember, string toMember, string unit, CancellationToken token) + => NativeAsync.GeoDistAsync(key, fromMember, toMember, unit, token); + + ValueTask IRedisClientAsync.GetGeohashesAsync(string key, string[] members, CancellationToken token) + => NativeAsync.GeoHashAsync(key, members, token); + + ValueTask> IRedisClientAsync.GetGeoCoordinatesAsync(string key, string[] members, CancellationToken token) + => NativeAsync.GeoPosAsync(key, members, token); + + async ValueTask IRedisClientAsync.FindGeoMembersInRadiusAsync(string key, double longitude, double latitude, double radius, string unit, CancellationToken token) + { + var results = await NativeAsync.GeoRadiusAsync(key, longitude, latitude, radius, unit, token: token).ConfigureAwait(false); + return ParseFindGeoMembersResult(results); + } + + ValueTask> IRedisClientAsync.FindGeoResultsInRadiusAsync(string key, double longitude, double latitude, double radius, string unit, int? count, bool? sortByNearest, CancellationToken token) + => NativeAsync.GeoRadiusAsync(key, longitude, latitude, radius, unit, withCoords: true, withDist: true, withHash: true, count: count, asc: sortByNearest, token: token); + + async ValueTask IRedisClientAsync.FindGeoMembersInRadiusAsync(string key, string member, double radius, string unit, CancellationToken token) + { + var results = await NativeAsync.GeoRadiusByMemberAsync(key, member, radius, unit, token: token).ConfigureAwait(false); + return ParseFindGeoMembersResult(results); + } + + ValueTask> IRedisClientAsync.FindGeoResultsInRadiusAsync(string key, string member, double radius, string unit, int? count, bool? sortByNearest, CancellationToken token) + => NativeAsync.GeoRadiusByMemberAsync(key, member, radius, unit, withCoords: true, withDist: true, withHash: true, count: count, asc: sortByNearest, token: token); + + ValueTask IRedisClientAsync.CreateSubscriptionAsync(CancellationToken token) + => new RedisSubscription(this).AsValueTaskResult(); + + ValueTask IRedisClientAsync.PublishMessageAsync(string toChannel, string message, CancellationToken token) + => NativeAsync.PublishAsync(toChannel, message.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.MoveBetweenSetsAsync(string fromSetId, string toSetId, string item, CancellationToken token) + => NativeAsync.SMoveAsync(fromSetId, toSetId, item.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.SetContainsItemAsync(string setId, string item, CancellationToken token) + => NativeAsync.SIsMemberAsync(setId, item.ToUtf8Bytes(), token).IsSuccessAsync(); + + async ValueTask> IRedisClientAsync.GetIntersectFromSetsAsync(string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) + return new HashSet(); + + var multiDataList = await NativeAsync.SInterAsync(setIds, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisClientAsync.StoreIntersectFromSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) return default; + + return NativeAsync.SInterStoreAsync(intoSetId, setIds, token); + } + + async ValueTask> IRedisClientAsync.GetUnionFromSetsAsync(string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) + return new HashSet(); + + var multiDataList = await NativeAsync.SUnionAsync(setIds, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisClientAsync.StoreUnionFromSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) return default; + + return NativeAsync.SUnionStoreAsync(intoSetId, setIds, token); + } + + async ValueTask> IRedisClientAsync.GetDifferencesFromSetAsync(string fromSetId, string[] withSetIds, CancellationToken token) + { + if (withSetIds.Length == 0) + return new HashSet(); + + var multiDataList = await NativeAsync.SDiffAsync(fromSetId, withSetIds, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisClientAsync.StoreDifferencesFromSetAsync(string intoSetId, string fromSetId, string[] withSetIds, CancellationToken token) + { + if (withSetIds.Length == 0) return default; + + return NativeAsync.SDiffStoreAsync(intoSetId, fromSetId, withSetIds, token); + } + + ValueTask IRedisClientAsync.GetRandomItemFromSetAsync(string setId, CancellationToken token) + => NativeAsync.SRandMemberAsync(setId, token).FromUtf8BytesAsync(); + + ValueTask> IRedisClientAsync.GetAllItemsFromListAsync(string listId, CancellationToken token) + => NativeAsync.LRangeAsync(listId, FirstElement, LastElement, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromListAsync(string listId, int startingFrom, int endingAt, CancellationToken token) + => NativeAsync.LRangeAsync(listId, startingFrom, endingAt, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedListAsync(string listId, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, SortAlpha = true }; + return AsAsync().GetSortedItemsFromListAsync(listId, sortOptions, token); + } + + ValueTask> IRedisClientAsync.GetSortedItemsFromListAsync(string listId, SortOptions sortOptions, CancellationToken token) + => NativeAsync.SortAsync(listId, sortOptions, token).ToStringListAsync(); + + async ValueTask IRedisClientAsync.AddRangeToListAsync(string listId, List values, CancellationToken token) + { + var pipeline = AddRangeToListPrepareNonFlushed(listId, values); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + //the number of items after + _ = await pipeline.ReadAllAsIntsAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisClientAsync.PrependItemToListAsync(string listId, string value, CancellationToken token) + => NativeAsync.LPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + async ValueTask IRedisClientAsync.PrependRangeToListAsync(string listId, List values, CancellationToken token) + { + var pipeline = PrependRangeToListPrepareNonFlushed(listId, values); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + //the number of items after + _ = await pipeline.ReadAllAsIntsAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisClientAsync.RemoveAllFromListAsync(string listId, CancellationToken token) + => NativeAsync.LTrimAsync(listId, LastElement, FirstElement, token); + + ValueTask IRedisClientAsync.RemoveStartFromListAsync(string listId, CancellationToken token) + => NativeAsync.LPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingRemoveStartFromListAsync(string listId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BLPopValueAsync(listId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.BlockingRemoveStartFromListsAsync(string[] listIds, TimeSpan? timeOut, CancellationToken token) + { + var value = await NativeAsync.BLPopValueAsync(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + if (value == null) + return null; + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } + + ValueTask IRedisClientAsync.RemoveEndFromListAsync(string listId, CancellationToken token) + => NativeAsync.RPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.TrimListAsync(string listId, int keepStartingFrom, int keepEndingAt, CancellationToken token) + => NativeAsync.LTrimAsync(listId, keepStartingFrom, keepEndingAt, token); + + ValueTask IRedisClientAsync.RemoveItemFromListAsync(string listId, string value, CancellationToken token) + => NativeAsync.LRemAsync(listId, 0, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.RemoveItemFromListAsync(string listId, string value, int noOfMatches, CancellationToken token) + => NativeAsync.LRemAsync(listId, 0, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.GetItemFromListAsync(string listId, int listIndex, CancellationToken token) + => NativeAsync.LIndexAsync(listId, listIndex, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.SetItemInListAsync(string listId, int listIndex, string value, CancellationToken token) + => NativeAsync.LSetAsync(listId, listIndex, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.EnqueueItemOnListAsync(string listId, string value, CancellationToken token) + => NativeAsync.LPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.DequeueItemFromListAsync(string listId, CancellationToken token) + => NativeAsync.RPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingDequeueItemFromListAsync(string listId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BRPopValueAsync(listId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.BlockingDequeueItemFromListsAsync(string[] listIds, TimeSpan? timeOut, CancellationToken token) + { + var value = await NativeAsync.BRPopValueAsync(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + if (value == null) + return null; + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } + + ValueTask IRedisClientAsync.PushItemToListAsync(string listId, string value, CancellationToken token) + => NativeAsync.RPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.PopItemFromListAsync(string listId, CancellationToken token) + => NativeAsync.RPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingPopItemFromListAsync(string listId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BRPopValueAsync(listId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.BlockingPopItemFromListsAsync(string[] listIds, TimeSpan? timeOut, CancellationToken token) + { + var value = await NativeAsync.BRPopValueAsync(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + if (value == null) + return null; + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } + + ValueTask IRedisClientAsync.PopAndPushItemBetweenListsAsync(string fromListId, string toListId, CancellationToken token) + => NativeAsync.RPopLPushAsync(fromListId, toListId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingPopAndPushItemBetweenListsAsync(string fromListId, string toListId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BRPopLPushAsync(fromListId, toListId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.AddRangeToSortedSetAsync(string setId, List values, double score, CancellationToken token) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToFastUtf8Bytes()); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + return await pipeline.ReadAllAsIntsHaveSuccessAsync(token).ConfigureAwait(false); + } + + async ValueTask IRedisClientAsync.AddRangeToSortedSetAsync(string setId, List values, long score, CancellationToken token) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToUtf8Bytes()); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + return await pipeline.ReadAllAsIntsHaveSuccessAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisClientAsync.RemoveItemFromSortedSetAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRemAsync(setId, value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.RemoveItemsFromSortedSetAsync(string setId, List values, CancellationToken token) + => NativeAsync.ZRemAsync(setId, values.Map(x => x.ToUtf8Bytes()).ToArray(), token); + + async ValueTask IRedisClientAsync.PopItemWithLowestScoreFromSortedSetAsync(string setId, CancellationToken token) + { + //TODO: this should be atomic + var topScoreItemBytes = await NativeAsync.ZRangeAsync(setId, FirstElement, 1, token).ConfigureAwait(false); + if (topScoreItemBytes.Length == 0) return null; + + await NativeAsync.ZRemAsync(setId, topScoreItemBytes[0], token).ConfigureAwait(false); + return topScoreItemBytes[0].FromUtf8Bytes(); + } + + async ValueTask IRedisClientAsync.PopItemWithHighestScoreFromSortedSetAsync(string setId, CancellationToken token) + { + //TODO: this should be atomic + var topScoreItemBytes = await NativeAsync.ZRevRangeAsync(setId, FirstElement, 1, token).ConfigureAwait(false); + if (topScoreItemBytes.Length == 0) return null; + + await NativeAsync.ZRemAsync(setId, topScoreItemBytes[0], token).ConfigureAwait(false); + return topScoreItemBytes[0].FromUtf8Bytes(); + } + + ValueTask IRedisClientAsync.SortedSetContainsItemAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRankAsync(setId, value.ToUtf8Bytes(), token).Await(val => val != -1); + + ValueTask IRedisClientAsync.IncrementItemInSortedSetAsync(string setId, string value, double incrementBy, CancellationToken token) + => NativeAsync.ZIncrByAsync(setId, incrementBy, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.IncrementItemInSortedSetAsync(string setId, string value, long incrementBy, CancellationToken token) + => NativeAsync.ZIncrByAsync(setId, incrementBy, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.GetItemIndexInSortedSetAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRankAsync(setId, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.GetItemIndexInSortedSetDescAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRevRankAsync(setId, value.ToUtf8Bytes(), token); + + ValueTask> IRedisClientAsync.GetAllItemsFromSortedSetAsync(string setId, CancellationToken token) + => NativeAsync.ZRangeAsync(setId, FirstElement, LastElement, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetAllItemsFromSortedSetDescAsync(string setId, CancellationToken token) + => NativeAsync.ZRevRangeAsync(setId, FirstElement, LastElement, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetAsync(string setId, int fromRank, int toRank, CancellationToken token) + => NativeAsync.ZRangeAsync(setId, fromRank, toRank, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetDescAsync(string setId, int fromRank, int toRank, CancellationToken token) + => NativeAsync.ZRevRangeAsync(setId, fromRank, toRank, token).ToStringListAsync(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ValueTask> CreateSortedScoreMapAsync(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? CreateSortedScoreMap(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask pending) + => CreateSortedScoreMap(await pending.ConfigureAwait(false)); + } + + ValueTask> IRedisClientAsync.GetAllWithScoresFromSortedSetAsync(string setId, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeWithScoresAsync(setId, FirstElement, LastElement, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetAsync(string setId, int fromRank, int toRank, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeWithScoresAsync(setId, fromRank, toRank, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetDescAsync(string setId, int fromRank, int toRank, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRevRangeWithScoresAsync(setId, fromRank, toRank, token)); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRevRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRevRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRevRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRevRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetAsync(string setId, int minRank, int maxRank, CancellationToken token) + => NativeAsync.ZRemRangeByRankAsync(setId, minRank, maxRank, token); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetByScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => NativeAsync.ZRemRangeByScoreAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetByScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => NativeAsync.ZRemRangeByScoreAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.StoreIntersectFromSortedSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + => NativeAsync.ZInterStoreAsync(intoSetId, setIds, token); + + ValueTask IRedisClientAsync.StoreIntersectFromSortedSetsAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + => base.ZInterStoreAsync(intoSetId, setIds, args, token); + + ValueTask IRedisClientAsync.StoreUnionFromSortedSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + => NativeAsync.ZUnionStoreAsync(intoSetId, setIds, token); + + ValueTask IRedisClientAsync.StoreUnionFromSortedSetsAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + => base.ZUnionStoreAsync(intoSetId, setIds, args, token); + + ValueTask IRedisClientAsync.HashContainsEntryAsync(string hashId, string key, CancellationToken token) + => NativeAsync.HExistsAsync(hashId, key.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetEntryInHashIfNotExistsAsync(string hashId, string key, string value, CancellationToken token) + => NativeAsync.HSetNXAsync(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetRangeInHashAsync(string hashId, IEnumerable> keyValuePairs, CancellationToken token) + => SetRangeInHashPrepare(keyValuePairs, out var keys, out var values) ? NativeAsync.HMSetAsync(hashId, keys, values, token) : default; + + ValueTask IRedisClientAsync.IncrementValueInHashAsync(string hashId, string key, int incrementBy, CancellationToken token) + => NativeAsync.HIncrbyAsync(hashId, key.ToUtf8Bytes(), incrementBy, token); + + ValueTask IRedisClientAsync.IncrementValueInHashAsync(string hashId, string key, double incrementBy, CancellationToken token) + => NativeAsync.HIncrbyFloatAsync(hashId, key.ToUtf8Bytes(), incrementBy, token); + + ValueTask IRedisClientAsync.GetValueFromHashAsync(string hashId, string key, CancellationToken token) + => NativeAsync.HGetAsync(hashId, key.ToUtf8Bytes(), token).FromUtf8BytesAsync(); + + ValueTask> IRedisClientAsync.GetValuesFromHashAsync(string hashId, string[] keys, CancellationToken token) + { + if (keys.Length == 0) return new List().AsValueTaskResult(); + var keyBytes = ConvertToBytes(keys); + return NativeAsync.HMGetAsync(hashId, keyBytes, token).ToStringListAsync(); + } + + ValueTask IRedisClientAsync.RemoveEntryFromHashAsync(string hashId, string key, CancellationToken token) + => NativeAsync.HDelAsync(hashId, key.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask> IRedisClientAsync.GetHashKeysAsync(string hashId, CancellationToken token) + => NativeAsync.HKeysAsync(hashId, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetHashValuesAsync(string hashId, CancellationToken token) + => NativeAsync.HValsAsync(hashId, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetAllEntriesFromHashAsync(string hashId, CancellationToken token) + => NativeAsync.HGetAllAsync(hashId, token).Await(ret => ret.ToStringDictionary()); + + ValueTask IRedisClientAsync.ExecLuaAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalCommandAsync(body, 0, args.ToMultiByteArray(), token).Await(ret => ret.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaShaAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaCommandAsync(sha1, 0, args.ToMultiByteArray(), token).Await(ret => ret.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaAsStringAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalStrAsync(body, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsStringAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaStrAsync(sha1, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaAsIntAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalIntAsync(body, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaAsIntAsync(string body, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalIntAsync(body, keys.Length, MergeAndConvertToBytes(keys, args), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsIntAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaIntAsync(sha1, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsIntAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaIntAsync(sha1, keys.Length, MergeAndConvertToBytes(keys, args), token); + + ValueTask> IRedisClientAsync.ExecLuaAsListAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalAsync(body, 0, args.ToMultiByteArray(), token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.ExecLuaAsListAsync(string body, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalAsync(body, keys.Length, MergeAndConvertToBytes(keys, args), token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.ExecLuaShaAsListAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaAsync(sha1, 0, args.ToMultiByteArray(), token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.ExecLuaShaAsListAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaAsync(sha1, keys.Length, MergeAndConvertToBytes(keys, args), token).ToStringListAsync(); + + ValueTask IRedisClientAsync.CalculateSha1Async(string luaBody, CancellationToken token) + => CalculateSha1(luaBody).AsValueTaskResult(); + + async ValueTask IRedisClientAsync.HasLuaScriptAsync(string sha1Ref, CancellationToken token) + { + var map = await AsAsync().WhichLuaScriptsExistsAsync(new[] { sha1Ref }, token).ConfigureAwait(false); + return map[sha1Ref]; + } + + async ValueTask> IRedisClientAsync.WhichLuaScriptsExistsAsync(string[] sha1Refs, CancellationToken token) + { + var intFlags = await NativeAsync.ScriptExistsAsync(sha1Refs.ToMultiByteArray()).ConfigureAwait(false); + return WhichLuaScriptsExistsParseResult(sha1Refs, intFlags); + } + + ValueTask IRedisClientAsync.RemoveAllLuaScriptsAsync(CancellationToken token) + => NativeAsync.ScriptFlushAsync(token); + + ValueTask IRedisClientAsync.KillRunningLuaScriptAsync(CancellationToken token) + => NativeAsync.ScriptKillAsync(token); + + ValueTask IRedisClientAsync.CustomAsync(params object[] cmdWithArgs) + => AsAsync().CustomAsync(cmdWithArgs, token: default); + + ValueTask IRedisClientAsync.RemoveEntryAsync(params string[] args) + => AsAsync().RemoveEntryAsync(args, token: default); + + ValueTask IRedisClientAsync.AddToHyperLogAsync(string key, params string[] elements) + => AsAsync().AddToHyperLogAsync(key, elements, token: default); + + ValueTask IRedisClientAsync.MergeHyperLogsAsync(string toKey, params string[] fromKeys) + => AsAsync().MergeHyperLogsAsync(toKey, fromKeys, token: default); + + ValueTask IRedisClientAsync.AddGeoMembersAsync(string key, params RedisGeo[] geoPoints) + => AsAsync().AddGeoMembersAsync(key, geoPoints, token: default); + + ValueTask IRedisClientAsync.GetGeohashesAsync(string key, params string[] members) + => AsAsync().GetGeohashesAsync(key, members, token: default); + + ValueTask> IRedisClientAsync.GetGeoCoordinatesAsync(string key, params string[] members) + => AsAsync().GetGeoCoordinatesAsync(key, members, token: default); + + ValueTask IRedisClientAsync.WatchAsync(params string[] keys) + => AsAsync().WatchAsync(keys, token: default); + + ValueTask> IRedisClientAsync.GetIntersectFromSetsAsync(params string[] setIds) + => AsAsync().GetIntersectFromSetsAsync(setIds, token: default); + + ValueTask IRedisClientAsync.StoreIntersectFromSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreIntersectFromSetsAsync(intoSetId, setIds, token: default); + + ValueTask> IRedisClientAsync.GetUnionFromSetsAsync(params string[] setIds) + => AsAsync().GetUnionFromSetsAsync(setIds, token: default); + + ValueTask IRedisClientAsync.StoreUnionFromSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreUnionFromSetsAsync(intoSetId, setIds, token: default); + + ValueTask> IRedisClientAsync.GetDifferencesFromSetAsync(string fromSetId, params string[] withSetIds) + => AsAsync().GetDifferencesFromSetAsync(fromSetId, withSetIds, token: default); + + ValueTask IRedisClientAsync.StoreDifferencesFromSetAsync(string intoSetId, string fromSetId, params string[] withSetIds) + => AsAsync().StoreDifferencesFromSetAsync(intoSetId, fromSetId, withSetIds, token: default); + + ValueTask IRedisClientAsync.StoreIntersectFromSortedSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreIntersectFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask IRedisClientAsync.StoreUnionFromSortedSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreUnionFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask> IRedisClientAsync.GetValuesFromHashAsync(string hashId, params string[] keys) + => AsAsync().GetValuesFromHashAsync(hashId, keys, token: default); + + ValueTask IRedisClientAsync.ExecLuaAsync(string body, params string[] args) + => AsAsync().ExecLuaAsync(body, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaShaAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsync(sha1, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaAsStringAsync(string luaBody, params string[] args) + => AsAsync().ExecLuaAsStringAsync(luaBody, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaShaAsStringAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsStringAsync(sha1, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaAsIntAsync(string luaBody, params string[] args) + => AsAsync().ExecLuaAsIntAsync(luaBody, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaShaAsIntAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsIntAsync(sha1, args, token: default); + + ValueTask> IRedisClientAsync.ExecLuaAsListAsync(string luaBody, params string[] args) + => AsAsync().ExecLuaAsListAsync(luaBody, args, token: default); + + ValueTask> IRedisClientAsync.ExecLuaShaAsListAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsListAsync(sha1, args, token: default); + + ValueTask> IRedisClientAsync.WhichLuaScriptsExistsAsync(params string[] sha1Refs) + => AsAsync().WhichLuaScriptsExistsAsync(sha1Refs, token: default); + } +} diff --git a/src/ServiceStack.Redis/RedisClient.ICacheClient.cs b/src/ServiceStack.Redis/RedisClient.ICacheClient.cs index 70af82c5..21bd3372 100644 --- a/src/ServiceStack.Redis/RedisClient.ICacheClient.cs +++ b/src/ServiceStack.Redis/RedisClient.ICacheClient.cs @@ -1,209 +1,247 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Linq; using System.Text; -using ServiceStack.CacheAccess; +using ServiceStack.Caching; using ServiceStack.Text; namespace ServiceStack.Redis { - public partial class RedisClient - : ICacheClient - { - public void RemoveAll(IEnumerable keys) - { - RemoveEntry(keys.ToArray()); - } - - public T Get(string key) - { - return typeof(T) == typeof(byte[]) - ? (T)(object)base.Get(key) - : JsonSerializer.DeserializeFromString(GetValue(key)); - } - - public long Increment(string key, uint amount) - { - return IncrementValueBy(key, (int)amount); - } - - public long Decrement(string key, uint amount) - { - return DecrementValueBy(key, (int)amount); - } - - public bool Add(string key, T value) - { - var bytesValue = value as byte[]; - if (bytesValue != null) - { - return base.SetNX(key, bytesValue) == Success; - } - - var valueString = JsonSerializer.SerializeToString(value); - return SetEntryIfNotExists(key, valueString); - } - - public bool Set(string key, T value) - { - var bytesValue = value as byte[]; - if (bytesValue != null) - { - base.Set(key, bytesValue); - return true; - } - - var valueString = JsonSerializer.SerializeToString(value); - SetEntry(key, valueString); - return true; - } - - public bool Replace(string key, T value) - { - var exists = ContainsKey(key); - if (!exists) return false; - - var bytesValue = value as byte[]; - if (bytesValue != null) - { - base.Set(key, bytesValue); - return true; - } - - SetEntry(key, JsonSerializer.SerializeToString(value)); - return true; - } - - public bool Add(string key, T value, DateTime expiresAt) - { - if (Add(key, value)) - { - ExpireEntryAt(key, expiresAt); - return true; - } - return false; - } - - public bool Set(string key, T value, TimeSpan expiresIn) - { - var bytesValue = value as byte[]; - if (bytesValue != null) - { - base.SetEx(key, (int)expiresIn.TotalSeconds, bytesValue); - return true; - } - - var valueString = JsonSerializer.SerializeToString(value); - SetEntry(key, valueString, expiresIn); - return true; - } - - public bool Set(string key, T value, DateTime expiresAt) - { - Set(key, value); - ExpireEntryAt(key, expiresAt); - return true; - } - - public bool Replace(string key, T value, DateTime expiresAt) - { - if (Replace(key, value)) - { - ExpireEntryAt(key, expiresAt); - return true; - } - return false; - } - - public bool Add(string key, T value, TimeSpan expiresIn) - { - if (Add(key, value)) - { - ExpireEntryIn(key, expiresIn); - return true; - } - return false; - } - - public bool Replace(string key, T value, TimeSpan expiresIn) - { - if (Replace(key, value)) - { - ExpireEntryIn(key, expiresIn); - return true; - } - return false; - } - - public IDictionary GetAll(IEnumerable keys) - { - var keysArray = keys.ToArray(); - var keyValues = MGet(keysArray); - var results = new Dictionary(); - var isBytes = typeof(T) == typeof(byte[]); - - var i = 0; - foreach (var keyValue in keyValues) - { - var key = keysArray[i++]; - - if (keyValue == null) - { - results[key] = default(T); - continue; - } - - if (isBytes) - { - results[key] = (T)(object)keyValue; - } - else - { - var keyValueString = Encoding.UTF8.GetString(keyValue); - results[key] = JsonSerializer.DeserializeFromString(keyValueString); - } - } - return results; - } - - public void SetAll(IDictionary values) - { - var keys = values.Keys.ToArray(); - var valBytes = new byte[values.Count][]; + public partial class RedisClient + : ICacheClient + { + public T Exec(Func action) + { + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) + { + return action(this); + } + } + + public void Exec(Action action) + { + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) + { + action(this); + } + } + + public void RemoveAll(IEnumerable keys) + { + Exec(r => r.RemoveEntry(keys.ToArray())); + } + + public T Get(string key) + { + return Exec(r => + typeof(T) == typeof(byte[]) + ? (T)(object)r.Get(key) + : JsonSerializer.DeserializeFromString(r.GetValue(key)) + ); + } + + //Looking up Dictionary for type is faster than HashSet. + private static readonly Dictionary numericTypes = new Dictionary { + { typeof(byte), true}, + { typeof(sbyte), true}, + { typeof(short), true}, + { typeof(ushort), true}, + { typeof(int), true}, + { typeof(uint), true}, + { typeof(long), true}, + { typeof(ulong), true}, + { typeof(double), true}, + { typeof(float), true}, + { typeof(decimal), true} + }; + + private static byte[] ToBytes(T value) + { + var bytesValue = value as byte[]; + if (bytesValue == null && (numericTypes.ContainsKey(typeof(T)) || !Equals(value, default(T)))) + bytesValue = value.ToJson().ToUtf8Bytes(); + return bytesValue; + } + + public long Increment(string key, uint amount) + { + return Exec(r => r.IncrementValueBy(key, (int)amount)); + } + + public long Decrement(string key, uint amount) + { + return Exec(r => DecrementValueBy(key, (int)amount)); + } + + public bool Add(string key, T value) + { + return Exec(r => r.Set(key, ToBytes(value), exists: false)); + } + + public bool Set(string key, T value) + { + Exec(r => ((RedisNativeClient)r).Set(key, ToBytes(value))); + return true; + } + + public bool Replace(string key, T value) + { + return Exec(r => r.Set(key, ToBytes(value), exists: true)); + } + + public bool Add(string key, T value, DateTime expiresAt) + { + AssertNotInTransaction(); + + return Exec(r => + { + if (r.Add(key, value)) + { + r.ExpireEntryAt(key, ConvertToServerDate(expiresAt)); + return true; + } + return false; + }); + } + + public bool Add(string key, T value, TimeSpan expiresIn) + { + return Exec(r => r.Set(key, ToBytes(value), exists: false, expiryMs: (long)expiresIn.TotalMilliseconds)); + } + + public bool Set(string key, T value, TimeSpan expiresIn) + { + if (AssertServerVersionNumber() >= 2600) + { + Exec(r => r.Set(key, ToBytes(value), 0, expiryMs: (long)expiresIn.TotalMilliseconds)); + } + else + { + Exec(r => r.SetEx(key, (int)expiresIn.TotalSeconds, ToBytes(value))); + } + return true; + } + + public bool Set(string key, T value, DateTime expiresAt) + { + AssertNotInTransaction(); + + Exec(r => + { + Set(key, value); + ExpireEntryAt(key, ConvertToServerDate(expiresAt)); + }); + return true; + } + + public bool Replace(string key, T value, DateTime expiresAt) + { + AssertNotInTransaction(); + + return Exec(r => + { + if (r.Replace(key, value)) + { + r.ExpireEntryAt(key, ConvertToServerDate(expiresAt)); + return true; + } + return false; + }); + } + + public bool Replace(string key, T value, TimeSpan expiresIn) + { + return Exec(r => r.Set(key, ToBytes(value), exists: true, expiryMs: (long)expiresIn.TotalMilliseconds)); + } + + public IDictionary GetAll(IEnumerable keys) + { + return Exec(r => + { + var keysArray = keys.ToArray(); + var keyValues = r.MGet(keysArray); + + return ProcessGetAllResult(keysArray, keyValues); + }); + } + + private static IDictionary ProcessGetAllResult(string[] keysArray, byte[][] keyValues) + { + var results = new Dictionary(); var isBytes = typeof(T) == typeof(byte[]); - var i = 0; + var i = 0; + foreach (var keyValue in keyValues) + { + var key = keysArray[i++]; + + if (keyValue == null) + { + results[key] = default(T); + continue; + } + + if (isBytes) + { + results[key] = (T)(object)keyValue; + } + else + { + var keyValueString = Encoding.UTF8.GetString(keyValue); + results[key] = JsonSerializer.DeserializeFromString(keyValueString); + } + } + return results; + } + + public void SetAll(IDictionary values) + { + if (values.Count != 0) + { + Exec(r => + { + // need to do this inside Exec for the JSON config bits + GetSetAllBytesTyped(values, out var keys, out var valBytes); + r.MSet(keys, valBytes); + }); + } + } + + private static void GetSetAllBytesTyped(IDictionary values, out string[] keys, out byte[][] valBytes) + { + keys = values.Keys.ToArray(); + valBytes = new byte[values.Count][]; + var isBytes = typeof(T) == typeof(byte[]); + + var i = 0; foreach (var value in values.Values) - { + { if (!isBytes) { var t = JsonSerializer.SerializeToString(value); if (t != null) valBytes[i] = t.ToUtf8Bytes(); else - valBytes[i] = new byte[]{}; + valBytes[i] = new byte[] { }; } else - valBytes[i] = (byte[])(object)value ?? new byte[]{}; - i++; - } - - MSet(keys, valBytes); + valBytes[i] = (byte[])(object)value ?? new byte[] { }; + i++; + } } - - } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient.cs b/src/ServiceStack.Redis/RedisClient.cs index 40fb99a4..1908f322 100644 --- a/src/ServiceStack.Redis/RedisClient.cs +++ b/src/ServiceStack.Redis/RedisClient.cs @@ -1,25 +1,26 @@ // -// http://code.google.com/p/servicestack/wiki/ServiceStackRedis +// https://github.com/ServiceStack/ServiceStack.Redis/ // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Text; -using ServiceStack.Common.Extensions; -using ServiceStack.Common.Utils; +using System.Threading; using ServiceStack.Redis.Generic; using ServiceStack.Redis.Pipeline; using ServiceStack.Text; +using ServiceStack.Caching; namespace ServiceStack.Redis { @@ -31,15 +32,20 @@ namespace ServiceStack.Redis /// RedisClient.Sets => ICollection[string] /// public partial class RedisClient - : RedisNativeClient, IRedisClient + : RedisNativeClient, IRedisClient, IRemoveByPattern // IRemoveByPattern is implemented in this file. { public RedisClient() { Init(); } + internal static HashSet __uniqueTypes = new HashSet(); + public static Func NewFactoryFn = () => new RedisClient(); + public static Func> ConvertToHashFn = + x => x.ToJson().FromJson>(); + /// /// Creates a new instance of the Redis Client from NewFactoryFn. /// @@ -54,13 +60,19 @@ public RedisClient(string host) Init(); } + public RedisClient(RedisEndpoint config) + : base(config) + { + Init(); + } + public RedisClient(string host, int port) : base(host, port) { Init(); } - public RedisClient(string host, int port, string password = null, int db = DefaultDb) + public RedisClient(string host, int port, string password = null, long db = RedisConfig.DefaultDb) : base(host, port, password, db) { Init(); @@ -69,7 +81,7 @@ public RedisClient(string host, int port, string password = null, int db = Defau public RedisClient(Uri uri) : base(uri.Host, uri.Port) { - var password = !string.IsNullOrEmpty(uri.UserInfo) ? uri.UserInfo.Split(':')[1] : null; + var password = !string.IsNullOrEmpty(uri.UserInfo) ? uri.UserInfo.Split(':').Last() : null; Password = password; Init(); } @@ -85,71 +97,154 @@ public void Init() public string this[string key] { get { return GetValue(key); } - set { SetEntry(key, value); } + set { SetValue(key, value); } } - public string GetTypeSequenceKey() + public override void OnConnected() { } + + public RedisText Custom(params object[] cmdWithArgs) { - return string.Concat(NamespacePrefix, "seq:", typeof(T).Name); + var data = base.RawCommand(cmdWithArgs); + var ret = data.ToRedisText(); + return ret; } - public string GetTypeIdsSetKey() + public DateTime ConvertToServerDate(DateTime expiresAt) => expiresAt; + + public string GetTypeSequenceKey() => string.Concat(NamespacePrefix, "seq:", typeof(T).Name); + + public string GetTypeIdsSetKey() => string.Concat(NamespacePrefix, "ids:", typeof(T).Name); + + public string GetTypeIdsSetKey(Type type) => string.Concat(NamespacePrefix, "ids:", type.Name); + + public void RewriteAppendOnlyFileAsync() => base.BgRewriteAof(); + + public List GetAllKeys() => SearchKeys("*"); + + public void SetValue(string key, string value) { - return string.Concat(NamespacePrefix, "ids:", typeof(T).Name); + var bytesValue = value?.ToUtf8Bytes(); + base.Set(key, bytesValue); } - public string GetTypeIdsSetKey(Type type) + public bool SetValue(byte[] key, byte[] value, TimeSpan expireIn) { - return string.Concat(NamespacePrefix, "ids:", type.Name); + if (AssertServerVersionNumber() >= 2600) + { + Exec(r => r.Set(key, value, 0, expiryMs: (long)expireIn.TotalMilliseconds)); + } + else + { + Exec(r => r.SetEx(key, (int)expireIn.TotalSeconds, value)); + } + + return true; } - public void RewriteAppendOnlyFileAsync() + public void SetValue(string key, string value, TimeSpan expireIn) { - base.BgRewriteAof(); + var bytesValue = value?.ToUtf8Bytes(); + + if (AssertServerVersionNumber() >= 2610) + { + if (expireIn.Milliseconds > 0) + base.Set(key, bytesValue, 0, (long)expireIn.TotalMilliseconds); + else + base.Set(key, bytesValue, (int)expireIn.TotalSeconds, 0); + } + else + { + SetEx(key, (int)expireIn.TotalSeconds, bytesValue); + } } - public List GetAllKeys() + public bool SetValueIfExists(string key, string value) { - return SearchKeys("*"); + var bytesValue = value?.ToUtf8Bytes(); + return base.Set(key, bytesValue, exists: true); } - public void SetEntry(string key, string value) + public bool SetValueIfNotExists(string key, string value) { - var bytesValue = value != null - ? value.ToUtf8Bytes() - : null; + var bytesValue = value?.ToUtf8Bytes(); + return base.Set(key, bytesValue, exists: false); + } - base.Set(key, bytesValue); + public bool SetValueIfExists(string key, string value, TimeSpan expireIn) + { + var bytesValue = value?.ToUtf8Bytes(); + + if (expireIn.Milliseconds > 0) + return base.Set(key, bytesValue, exists: true, expiryMs: (long)expireIn.TotalMilliseconds); + else + return base.Set(key, bytesValue, exists: true, expirySeconds: (int)expireIn.TotalSeconds); + } + + public bool SetValueIfNotExists(string key, string value, TimeSpan expireIn) + { + var bytesValue = value?.ToUtf8Bytes(); + + if (expireIn.Milliseconds > 0) + return base.Set(key, bytesValue, exists: false, expiryMs: (long)expireIn.TotalMilliseconds); + else + return base.Set(key, bytesValue, exists: false, expirySeconds: (int)expireIn.TotalSeconds); + } + + public void SetValues(Dictionary map) + { + SetAll(map); } public void SetAll(IEnumerable keys, IEnumerable values) { - if (keys == null || values == null) return; + if (GetSetAllBytes(keys, values, out var keyBytes, out var valBytes)) + { + base.MSet(keyBytes, valBytes); + } + } + + bool GetSetAllBytes(IEnumerable keys, IEnumerable values, out byte[][] keyBytes, out byte[][] valBytes) + { + keyBytes = valBytes = default; + if (keys == null || values == null) return false; var keyArray = keys.ToArray(); var valueArray = values.ToArray(); if (keyArray.Length != valueArray.Length) throw new Exception("Key length != Value Length. {0}/{1}".Fmt(keyArray.Length, valueArray.Length)); - if (keyArray.Length == 0) return; + if (keyArray.Length == 0) return false; - var keyBytes = new byte[keyArray.Length][]; - var valBytes = new byte[keyArray.Length][]; + keyBytes = new byte[keyArray.Length][]; + valBytes = new byte[keyArray.Length][]; for (int i = 0; i < keyArray.Length; i++) { keyBytes[i] = keyArray[i].ToUtf8Bytes(); valBytes[i] = valueArray[i].ToUtf8Bytes(); } - base.MSet(keyBytes, valBytes); + return true; } public void SetAll(Dictionary map) { - if (map == null || map.Count == 0) return; + if (GetSetAllBytes(map, out var keyBytes, out var valBytes)) + { + base.MSet(keyBytes, valBytes); + } + } + + private static bool GetSetAllBytes(IDictionary map, out byte[][] keyBytes, out byte[][] valBytes) + { + if (map == null || map.Count == 0) + { + keyBytes = null; + valBytes = null; + return false; + } - var keyBytes = new byte[map.Count][]; - var valBytes = new byte[map.Count][]; + keyBytes = new byte[map.Count][]; + valBytes = new byte[map.Count][]; var i = 0; foreach (var key in map.Keys) @@ -159,36 +254,16 @@ public void SetAll(Dictionary map) valBytes[i] = val.ToUtf8Bytes(); i++; } - - base.MSet(keyBytes, valBytes); - } - - public void SetEntry(string key, string value, TimeSpan expireIn) - { - var bytesValue = value != null - ? value.ToUtf8Bytes() - : null; - - SetEx(key, (int)expireIn.TotalSeconds, bytesValue); - } - - public bool SetEntryIfNotExists(string key, string value) - { - if (value == null) - throw new ArgumentNullException("value"); - - return SetNX(key, value.ToUtf8Bytes()) == Success; + return true; } public string GetValue(string key) { var bytes = Get(key); - return bytes == null - ? null - : bytes.FromUtf8Bytes(); + return bytes?.FromUtf8Bytes(); } - public string GetAndSetEntry(string key, string value) + public string GetAndSetValue(string key, string value) { return GetSet(key, value.ToUtf8Bytes()).FromUtf8Bytes(); } @@ -203,6 +278,11 @@ public bool Remove(string key) return Del(key) == Success; } + public bool Remove(byte[] key) + { + return Del(key) == Success; + } + public bool RemoveEntry(params string[] keys) { if (keys.Length == 0) return false; @@ -220,6 +300,16 @@ public long IncrementValueBy(string key, int count) return IncrBy(key, count); } + public long IncrementValueBy(string key, long count) + { + return IncrBy(key, count); + } + + public double IncrementValueBy(string key, double count) + { + return IncrByFloat(key, count); + } + public long DecrementValue(string key) { return Decr(key); @@ -230,7 +320,7 @@ public long DecrementValueBy(string key, int count) return DecrBy(key, count); } - public int AppendToValue(string key, string value) + public long AppendToValue(string key, string value) { return base.Append(key, value.ToUtf8Bytes()); } @@ -240,9 +330,9 @@ public void RenameKey(string fromName, string toName) base.Rename(fromName, toName); } - public string GetSubstring(string key, int fromIndex, int toIndex) + public long GetStringCount(string key) { - return base.Substr(key, fromIndex, toIndex).FromUtf8Bytes(); + return base.StrLen(key); } public string GetRandomKey() @@ -252,28 +342,71 @@ public string GetRandomKey() public bool ExpireEntryIn(string key, TimeSpan expireIn) { + if (UseMillisecondExpiration(expireIn)) + { + return PExpire(key, (long)expireIn.TotalMilliseconds); + } + + return Expire(key, (int)expireIn.TotalSeconds); + } + + private bool UseMillisecondExpiration(TimeSpan value) + + => AssertServerVersionNumber() >= 2600 && value.Milliseconds > 0; + + public bool ExpireEntryIn(byte[] key, TimeSpan expireIn) + { + if (UseMillisecondExpiration(expireIn)) + { + return PExpire(key, (long)expireIn.TotalMilliseconds); + } + return Expire(key, (int)expireIn.TotalSeconds); } public bool ExpireEntryAt(string key, DateTime expireAt) { - return ExpireAt(key, expireAt.ToUnixTime()); + if (AssertServerVersionNumber() >= 2600) + { + return PExpireAt(key, ConvertToServerDate(expireAt).ToUnixTimeMs()); + } + else + { + return ExpireAt(key, ConvertToServerDate(expireAt).ToUnixTime()); + } } - public TimeSpan GetTimeToLive(string key) + public TimeSpan? GetTimeToLive(string key) + => ParseTimeToLiveResult(Ttl(key)); + + private static TimeSpan? ParseTimeToLiveResult(long ttlSecs) { - return TimeSpan.FromSeconds(Ttl(key)); + if (ttlSecs == -1) + return TimeSpan.MaxValue; //no expiry set + + if (ttlSecs == -2) + return null; //key does not exist + + return TimeSpan.FromSeconds(ttlSecs); } - [Obsolete("Renamed to 'As'")] - public IRedisTypedClient GetTypedClient() + public void RemoveExpiredEntries() { - return new RedisTypedClient(this); + //Redis automatically removed expired Cache Entries } public IRedisTypedClient As() { - return new RedisTypedClient(this); + try + { + var typedClient = new RedisTypedClient(this); + LicenseUtils.AssertValidUsage(LicenseFeature.Redis, QuotaType.Types, __uniqueTypes.Count); + return typedClient; + } + catch (TypeInitializationException ex) + { + throw ex.GetInnerMostException(); + } } public IDisposable AcquireLock(string key) @@ -288,7 +421,14 @@ public IDisposable AcquireLock(string key, TimeSpan timeOut) public IRedisTransaction CreateTransaction() { - return new RedisTransaction(this); + AssertServerVersionNumber(); // pre-fetch call to INFO before transaction if needed + return new RedisTransaction(this, false); + } + + public void AssertNotInTransaction() + { + if (Transaction != null || Pipeline != null) + throw new NotSupportedException("Only atomic redis-server operations are supported in a transaction"); } public IRedisPipeline CreatePipeline() @@ -298,18 +438,20 @@ public IRedisPipeline CreatePipeline() public List SearchKeys(string pattern) { - var multiDataList = Keys(pattern); - return multiDataList.ToStringList(); + var multiDataList = ScanAllKeys(pattern); + return multiDataList.ToList(); } public List GetValues(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new List(); - var resultBytesArray = MGet(keys.ToArray()); - - var results = new List(); + return ParseGetValuesResult(MGet(keys.ToArray())); + } + private static List ParseGetValuesResult(byte[][] resultBytesArray) + { + var results = new List(resultBytesArray.Length); foreach (var resultBytes in resultBytesArray) { if (resultBytes == null) continue; @@ -323,12 +465,15 @@ public List GetValues(List keys) public List GetValues(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new List(); - var resultBytesArray = MGet(keys.ToArray()); + return ParseGetValuesResult(MGet(keys.ToArray())); + } - var results = new List(); + private static List ParseGetValuesResult(byte[][] resultBytesArray) + { + var results = new List(resultBytesArray.Length); foreach (var resultBytes in resultBytesArray) { if (resultBytes == null) continue; @@ -343,12 +488,17 @@ public List GetValues(List keys) public Dictionary GetValuesMap(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new Dictionary(); var keysArray = keys.ToArray(); var resultBytesArray = MGet(keysArray); + return ParseGetValuesMapResult(keysArray, resultBytesArray); + } + + private static Dictionary ParseGetValuesMapResult(string[] keysArray, byte[][] resultBytesArray) + { var results = new Dictionary(); for (var i = 0; i < resultBytesArray.Length; i++) { @@ -371,12 +521,17 @@ public Dictionary GetValuesMap(List keys) public Dictionary GetValuesMap(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new Dictionary(); var keysArray = keys.ToArray(); var resultBytesArray = MGet(keysArray); + return ParseGetValuesMapResult(keysArray, resultBytesArray); + } + + private static Dictionary ParseGetValuesMapResult(string[] keysArray, byte[][] resultBytesArray) + { var results = new Dictionary(); for (var i = 0; i < resultBytesArray.Length; i++) { @@ -398,25 +553,23 @@ public Dictionary GetValuesMap(List keys) return results; } - public IRedisSubscription CreateSubscription() + public override IRedisSubscription CreateSubscription() { return new RedisSubscription(this); } - public int PublishMessage(string toChannel, string message) + public long PublishMessage(string toChannel, string message) { return base.Publish(toChannel, message.ToUtf8Bytes()); } #region IBasicPersistenceProvider - Dictionary> registeredTypeIdsWithinPipelineMap = new Dictionary>(); internal HashSet GetRegisteredTypeIdsWithinPipeline(string typeIdsSet) { - HashSet registeredTypeIdsWithinPipeline; - if (!registeredTypeIdsWithinPipelineMap.TryGetValue(typeIdsSet, out registeredTypeIdsWithinPipeline)) + if (!registeredTypeIdsWithinPipelineMap.TryGetValue(typeIdsSet, out var registeredTypeIdsWithinPipeline)) { registeredTypeIdsWithinPipeline = new HashSet(); registeredTypeIdsWithinPipelineMap[typeIdsSet] = registeredTypeIdsWithinPipeline; @@ -448,7 +601,7 @@ internal void RegisterTypeId(string typeIdsSetKey, string id) internal void RegisterTypeIds(IEnumerable values) { var typeIdsSetKey = GetTypeIdsSetKey(); - var ids = values.ConvertAll(x => x.GetId().ToString()); + var ids = values.Map(x => x.GetId().ToString()); if (this.Pipeline != null) { @@ -461,44 +614,51 @@ internal void RegisterTypeIds(IEnumerable values) } } - internal void RemoveTypeIds(params string[] ids) + internal void RemoveTypeIdsById(string id) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey).Remove(id); + else + this.RemoveItemFromSet(typeIdsSetKey, id); + } + + internal void RemoveTypeIdsByIds(IEnumerable ids) { var typeIdsSetKey = GetTypeIdsSetKey(); if (this.Pipeline != null) { var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); - ids.ForEach(x => registeredTypeIdsWithinPipeline.Remove(x)); + ids.Each(x => registeredTypeIdsWithinPipeline.Remove(x)); } else { - ids.ForEach(x => this.RemoveItemFromSet(typeIdsSetKey, x)); + ids.Each(x => this.RemoveItemFromSet(typeIdsSetKey, x)); } } - internal void RemoveTypeIds(params T[] values) + internal void RemoveTypeIdsByValue(T value) => RemoveTypeIdsById(value.GetId().ToString()); + + internal void RemoveTypeIdsByValues(IEnumerable values) { var typeIdsSetKey = GetTypeIdsSetKey(); if (this.Pipeline != null) { var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); - values.ForEach(x => registeredTypeIdsWithinPipeline.Remove(x.GetId().ToString())); + values.Each(x => registeredTypeIdsWithinPipeline.Remove(x.GetId().ToString())); } else { - values.ForEach(x => this.RemoveItemFromSet(typeIdsSetKey, x.GetId().ToString())); + values.Each(x => this.RemoveItemFromSet(typeIdsSetKey, x.GetId().ToString())); } } + // Called just after original Pipeline is closed. internal void AddTypeIdsRegisteredDuringPipeline() { foreach (var entry in registeredTypeIdsWithinPipelineMap) { - var typeIdsSetKey = entry.Key; - foreach (var id in entry.Value) - { - var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); - registeredTypeIdsWithinPipeline.ForEach(x => this.AddItemToSet(typeIdsSetKey, id)); - } + AddRangeToSet(entry.Key, entry.Value.ToList()); } registeredTypeIdsWithinPipelineMap = new Dictionary>(); } @@ -508,8 +668,7 @@ internal void ClearTypeIdsRegisteredDuringPipeline() registeredTypeIdsWithinPipelineMap = new Dictionary>(); } - - public T GetById(object id) where T : class, new() + public T GetById(object id) { var key = UrnKey(id); var valueString = this.GetValue(key); @@ -518,31 +677,28 @@ internal void ClearTypeIdsRegisteredDuringPipeline() } public IList GetByIds(ICollection ids) - where T : class, new() { if (ids == null || ids.Count == 0) return new List(); - var urnKeys = ids.ConvertAll(x => UrnKey(x)); + var urnKeys = ids.Cast().Map(UrnKey); return GetValues(urnKeys); } public IList GetAll() - where T : class, new() { var typeIdsSetKy = this.GetTypeIdsSetKey(); var allTypeIds = this.GetAllItemsFromSet(typeIdsSetKy); - var urnKeys = allTypeIds.ConvertAll(x => UrnKey(x)); + var urnKeys = allTypeIds.Cast().Map(UrnKey); return GetValues(urnKeys); } public T Store(T entity) - where T : class, new() { var urnKey = UrnKey(entity); var valueString = JsonSerializer.SerializeToString(entity); - this.SetEntry(urnKey, valueString); + this.SetValue(urnKey, valueString); RegisterTypeId(entity); return entity; @@ -550,14 +706,14 @@ public T Store(T entity) public object StoreObject(object entity) { - if (entity == null) throw new ArgumentNullException("entity"); + if (entity == null) throw new ArgumentNullException(nameof(entity)); var id = entity.GetObjectId(); var entityType = entity.GetType(); var urnKey = UrnKey(entityType, id); var valueString = JsonSerializer.SerializeToString(entity); - this.SetEntry(urnKey, valueString); + this.SetValue(urnKey, valueString); RegisterTypeId(GetTypeIdsSetKey(entityType), id.ToString()); @@ -565,7 +721,6 @@ public object StoreObject(object entity) } public void StoreAll(IEnumerable entities) - where TEntity : class, new() { _StoreAll(entities); } @@ -573,56 +728,87 @@ public void StoreAll(IEnumerable entities) public T GetFromHash(object id) { var key = UrnKey(id); - return - GetAllEntriesFromHash(key).ToJson().FromJson(); + return GetAllEntriesFromHash(key).ToJson().FromJson(); } + /// + /// Store object fields as a dictionary of values in a Hash value. + /// Conversion to Dictionary can be customized with RedisClient.ConvertToHashFn + /// public void StoreAsHash(T entity) { var key = UrnKey(entity); - SetRangeInHash(key, entity.ToJson().FromJson>()); + var hash = ConvertToHashFn(entity); + SetRangeInHash(key, hash); RegisterTypeId(entity); } //Without the Generic Constraints internal void _StoreAll(IEnumerable entities) { - if (entities == null) return; + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + base.MSet(keys, values); + RegisterTypeIds(entitiesList); + } + } - var entitiesList = entities.ToList(); + private bool PrepareStoreAll(IEnumerable entities, out byte[][] keys, out byte[][] values, out List entitiesList) + { + if (entities == null) + { + entitiesList = default; + keys = values = default; + return false; + } + + entitiesList = entities.ToList(); var len = entitiesList.Count; - if (len == 0) return; + if (len == 0) + { + keys = values = default; + return false; + } - var keys = new byte[len][]; - var values = new byte[len][]; + keys = new byte[len][]; + values = new byte[len][]; for (var i = 0; i < len; i++) { keys[i] = UrnKey(entitiesList[i]).ToUtf8Bytes(); values[i] = SerializeToUtf8Bytes(entitiesList[i]); } - - base.MSet(keys, values); - RegisterTypeIds(entitiesList); + return true; } public void WriteAll(IEnumerable entities) { - if (entities == null) return; + if (PrepareWriteAll(entities, out var keys, out var values)) + { + base.MSet(keys, values); + } + } + + private bool PrepareWriteAll(IEnumerable entities, out byte[][] keys, out byte[][] values) + { + if (entities == null) + { + keys = values = default; + return false; + } var entitiesList = entities.ToList(); var len = entitiesList.Count; - var keys = new byte[len][]; - var values = new byte[len][]; + keys = new byte[len][]; + values = new byte[len][]; for (var i = 0; i < len; i++) { keys[i] = UrnKey(entitiesList[i]).ToUtf8Bytes(); values[i] = SerializeToUtf8Bytes(entitiesList[i]); } - - base.MSet(keys, values); + return true; } public static byte[] SerializeToUtf8Bytes(T value) @@ -631,47 +817,62 @@ public static byte[] SerializeToUtf8Bytes(T value) } public void Delete(T entity) - where T : class, new() { var urnKey = UrnKey(entity); this.Remove(urnKey); - this.RemoveTypeIds(entity); + this.RemoveTypeIdsByValue(entity); } - public void DeleteById(object id) where T : class, new() + public void DeleteById(object id) { var urnKey = UrnKey(id); this.Remove(urnKey); - this.RemoveTypeIds(id.ToString()); + this.RemoveTypeIdsById(id.ToString()); } - public void DeleteByIds(ICollection ids) where T : class, new() + public void DeleteByIds(ICollection ids) { if (ids == null || ids.Count == 0) return; - var urnKeys = ids.ConvertAll(UrnKey); - this.RemoveEntry(urnKeys.ToArray()); - this.RemoveTypeIds(ids.ConvertAll(x => x.ToString()).ToArray()); + var idsList = ids.Cast(); + var urnKeys = idsList.Select(UrnKey).ToArray(); + this.RemoveEntry(urnKeys); + this.RemoveTypeIdsByIds(ids.Map(x => x.ToString()).ToArray()); + } + + public void DeleteAll() + { + DeleteAll(0,RedisConfig.CommandKeysBatchSize); } - public void DeleteAll() where T : class, new() + private void DeleteAll(ulong cursor, int batchSize) { var typeIdsSetKey = this.GetTypeIdsSetKey(); - var ids = this.GetAllItemsFromSet(typeIdsSetKey); - if (ids.Count > 0) + do { - var urnKeys = ids.ConvertAll(UrnKey); - this.RemoveEntry(urnKeys.ToArray()); - this.Remove(typeIdsSetKey); - } + var scanResult = this.SScan(typeIdsSetKey, cursor, batchSize); + cursor = scanResult.Cursor; + var urnKeys = scanResult.Results.Select(id => UrnKey(id.FromUtf8Bytes())).ToArray(); + if (urnKeys.Length > 0) + { + this.RemoveEntry(urnKeys); + } + } while (cursor != 0); + + this.RemoveEntry(typeIdsSetKey); } + public RedisClient CloneClient() => new(Host, Port, Password, Db) { + SendTimeout = SendTimeout, + ReceiveTimeout = ReceiveTimeout + }; + /// /// Returns key with automatic object id detection in provided value with generic type. /// /// /// - internal string UrnKey(T value) + public string UrnKey(T value) { return string.Concat(NamespacePrefix, value.CreateUrn()); } @@ -681,7 +882,7 @@ internal string UrnKey(T value) /// /// /// - internal string UrnKey(object id) + public string UrnKey(object id) { return string.Concat(NamespacePrefix, IdUtils.CreateUrn(id)); } @@ -692,7 +893,7 @@ internal string UrnKey(object id) /// /// /// - internal string UrnKey(Type type, object id) + public string UrnKey(Type type, object id) { return string.Concat(NamespacePrefix, IdUtils.CreateUrn(type, id)); } @@ -702,22 +903,67 @@ internal string UrnKey(Type type, object id) #region LUA EVAL - public int ExecLuaAsInt(string body, params string[] args) + static readonly ConcurrentDictionary CachedLuaSha1Map = new(); + + public T ExecCachedLua(string scriptBody, Func scriptSha1) + { + if (!CachedLuaSha1Map.TryGetValue(scriptBody, out var sha1)) + CachedLuaSha1Map[scriptBody] = sha1 = LoadLuaScript(scriptBody); + + try + { + return scriptSha1(sha1); + } + catch (RedisResponseException ex) + { + if (!ex.Message.StartsWith("NOSCRIPT")) + throw; + + CachedLuaSha1Map[scriptBody] = sha1 = LoadLuaScript(scriptBody); + return scriptSha1(sha1); + } + } + + public RedisText ExecLua(string body, params string[] args) + { + var data = base.EvalCommand(body, 0, args.ToMultiByteArray()); + return data.ToRedisText(); + } + + public RedisText ExecLua(string luaBody, string[] keys, string[] args) + { + var data = base.EvalCommand(luaBody, keys.Length, MergeAndConvertToBytes(keys, args)); + return data.ToRedisText(); + } + + public RedisText ExecLuaSha(string sha1, params string[] args) + { + var data = base.EvalShaCommand(sha1, 0, args.ToMultiByteArray()); + return data.ToRedisText(); + } + + public RedisText ExecLuaSha(string sha1, string[] keys, string[] args) + { + var data = base.EvalShaCommand(sha1, keys.Length, MergeAndConvertToBytes(keys, args)); + return data.ToRedisText(); + } + + public long ExecLuaAsInt(string body, params string[] args) { return base.EvalInt(body, 0, args.ToMultiByteArray()); } - public int ExecLuaAsInt(string luaBody, string[] keys, string[] args) + public long ExecLuaAsInt(string luaBody, string[] keys, string[] args) { return base.EvalInt(luaBody, keys.Length, MergeAndConvertToBytes(keys, args)); } - public int ExecLuaShaAsInt(string sha1, params string[] args) + public long ExecLuaShaAsInt(string sha1, params string[] args) { - return base.EvalShaInt(sha1, args.Length, args.ToMultiByteArray()); + return base.EvalShaInt(sha1, 0, args.ToMultiByteArray()); } - public int ExecLuaShaAsInt(string sha1, string[] keys, string[] args) + public long ExecLuaShaAsInt(string sha1, string[] keys, string[] args) { return base.EvalShaInt(sha1, keys.Length, MergeAndConvertToBytes(keys, args)); } @@ -771,6 +1017,10 @@ public bool HasLuaScript(string sha1Ref) public Dictionary WhichLuaScriptsExists(params string[] sha1Refs) { var intFlags = base.ScriptExists(sha1Refs.ToMultiByteArray()); + return WhichLuaScriptsExistsParseResult(sha1Refs, intFlags); + } + static Dictionary WhichLuaScriptsExistsParseResult(string[] sha1Refs, byte[][] intFlags) + { var map = new Dictionary(); for (int i = 0; i < sha1Refs.Length; i++) { @@ -796,6 +1046,163 @@ public string LoadLuaScript(string body) } #endregion + + public void RemoveByPattern(string pattern) + { + var keys = ScanAllKeys(pattern).ToArray(); + if (keys.Length > 0) + Del(keys); + } + + public void RemoveByRegex(string pattern) + { + RemoveByPattern(RegexToGlob(pattern)); + } + + private static string RegexToGlob(string regex) + => regex.Replace(".*", "*").Replace(".+", "?"); + + public IEnumerable ScanAllKeys(string pattern = null, int pageSize = 1000) + { + var ret = new ScanResult(); + while (true) + { + ret = pattern != null + ? base.Scan(ret.Cursor, pageSize, match: pattern) + : base.Scan(ret.Cursor, pageSize); + + foreach (var key in ret.Results) + { + yield return key.FromUtf8Bytes(); + } + + if (ret.Cursor == 0) break; + } + } + + public IEnumerable ScanAllSetItems(string setId, string pattern = null, int pageSize = 1000) + { + var ret = new ScanResult(); + while (true) + { + ret = pattern != null + ? base.SScan(setId, ret.Cursor, pageSize, match: pattern) + : base.SScan(setId, ret.Cursor, pageSize); + + foreach (var key in ret.Results) + { + yield return key.FromUtf8Bytes(); + } + + if (ret.Cursor == 0) break; + } + } + + public IEnumerable> ScanAllSortedSetItems(string setId, string pattern = null, int pageSize = 1000) + { + var ret = new ScanResult(); + while (true) + { + ret = pattern != null + ? base.ZScan(setId, ret.Cursor, pageSize, match: pattern) + : base.ZScan(setId, ret.Cursor, pageSize); + + foreach (var entry in ret.AsItemsWithScores()) + { + yield return entry; + } + + if (ret.Cursor == 0) break; + } + } + + public IEnumerable> ScanAllHashEntries(string hashId, string pattern = null, int pageSize = 1000) + { + var ret = new ScanResult(); + while (true) + { + ret = pattern != null + ? base.HScan(hashId, ret.Cursor, pageSize, match: pattern) + : base.HScan(hashId, ret.Cursor, pageSize); + + foreach (var entry in ret.AsKeyValues()) + { + yield return entry; + } + + if (ret.Cursor == 0) break; + } + } + + public bool AddToHyperLog(string key, params string[] elements) + { + return base.PfAdd(key, elements.Map(x => x.ToUtf8Bytes()).ToArray()); + } + + public long CountHyperLog(string key) + { + return base.PfCount(key); + } + + public void MergeHyperLogs(string toKey, params string[] fromKeys) + { + base.PfMerge(toKey, fromKeys); + } + + public RedisServerRole GetServerRole() + { + if (AssertServerVersionNumber() >= 2812) + { + var text = base.Role(); + var roleName = text.Children[0].Text; + return ToServerRole(roleName); + } + + this.Info.TryGetValue("role", out var role); + return ToServerRole(role); + } + + private static RedisServerRole ToServerRole(string roleName) + { + if (string.IsNullOrEmpty(roleName)) + return RedisServerRole.Unknown; + + switch (roleName) + { + case "master": + return RedisServerRole.Master; + case "slave": + return RedisServerRole.Slave; + case "sentinel": + return RedisServerRole.Sentinel; + default: + return RedisServerRole.Unknown; + } + } + + internal RedisClient LimitAccessToThread(int originalThreadId, string originalStackTrace) + { + TrackThread = new TrackThread(originalThreadId, originalStackTrace); + return this; + } + } + + internal struct TrackThread + { + public readonly int ThreadId; + public readonly string StackTrace; + + public TrackThread(int threadId, string stackTrace) + { + ThreadId = threadId; + StackTrace = stackTrace; + } + } + + public class InvalidAccessException : RedisException + { + public InvalidAccessException(int threadId, string stackTrace) + : base($"The Current Thread #{Thread.CurrentThread.ManagedThreadId} is different to the original Thread #{threadId} that resolved this pooled client at: \n{stackTrace}") { } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientExtensions.cs b/src/ServiceStack.Redis/RedisClientExtensions.cs new file mode 100644 index 00000000..79e655ed --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientExtensions.cs @@ -0,0 +1,15 @@ +namespace ServiceStack.Redis +{ + public static partial class RedisClientExtensions + { + public static string GetHostString(this IRedisClient redis) + { + return "{0}:{1}".Fmt(redis.Host, redis.Port); + } + + public static string GetHostString(this RedisEndpoint config) + { + return "{0}:{1}".Fmt(config.Host, config.Port); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientFactory.cs b/src/ServiceStack.Redis/RedisClientFactory.cs deleted file mode 100644 index 85f8ef39..00000000 --- a/src/ServiceStack.Redis/RedisClientFactory.cs +++ /dev/null @@ -1,30 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Net; - -namespace ServiceStack.Redis -{ - /// - /// Provide the default factory implementation for creating a RedisClient that - /// can be mocked and used by different 'Redis Client Managers' - /// - public class RedisClientFactory : IRedisClientFactory - { - public static RedisClientFactory Instance = new RedisClientFactory(); - - public RedisClient CreateRedisClient(string host, int port) - { - return new RedisClient(host, port); - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientHash.Async.cs b/src/ServiceStack.Redis/RedisClientHash.Async.cs new file mode 100644 index 00000000..c1bab49f --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientHash.Async.cs @@ -0,0 +1,55 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientHash + : IRedisHashAsync + { + private IRedisClientAsync AsyncClient => client; + + ValueTask IRedisHashAsync.AddAsync(KeyValuePair item, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(hashId, item.Key, item.Value, token).Await(); + + ValueTask IRedisHashAsync.AddAsync(string key, string value, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(hashId, key, value, token).Await(); + + ValueTask IRedisHashAsync.AddIfNotExistsAsync(KeyValuePair item, CancellationToken token) + => AsyncClient.SetEntryInHashIfNotExistsAsync(hashId, item.Key, item.Value, token); + + ValueTask IRedisHashAsync.AddRangeAsync(IEnumerable> items, CancellationToken token) + => AsyncClient.SetRangeInHashAsync(hashId, items, token); + + ValueTask IRedisHashAsync.ClearAsync(CancellationToken token) + => new ValueTask(AsyncClient.RemoveAsync(hashId, token)); + + ValueTask IRedisHashAsync.ContainsKeyAsync(string key, CancellationToken token) + => AsyncClient.HashContainsEntryAsync(hashId, key, token); + + ValueTask IRedisHashAsync.CountAsync(CancellationToken token) + => AsyncClient.GetHashCountAsync(hashId, token).AsInt32(); + + IAsyncEnumerator> IAsyncEnumerable>.GetAsyncEnumerator(CancellationToken token) + => AsyncClient.ScanAllHashEntriesAsync(hashId).GetAsyncEnumerator(token); // note: we're using HSCAN here, not HGETALL + + ValueTask IRedisHashAsync.IncrementValueAsync(string key, int incrementBy, CancellationToken token) + => AsyncClient.IncrementValueInHashAsync(hashId, key, incrementBy, token); + + ValueTask IRedisHashAsync.RemoveAsync(string key, CancellationToken token) + => AsyncClient.RemoveEntryFromHashAsync(hashId, key, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientHash.cs b/src/ServiceStack.Redis/RedisClientHash.cs index bfba4ec3..5aad5911 100644 --- a/src/ServiceStack.Redis/RedisClientHash.cs +++ b/src/ServiceStack.Redis/RedisClientHash.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -20,7 +20,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientHash + internal partial class RedisClientHash : IRedisHash { private readonly RedisClient client; @@ -57,7 +57,7 @@ public void AddRange(IEnumerable> items) client.SetRangeInHash(hashId, items); } - public int IncrementValue(string key, int incrementBy) + public long IncrementValue(string key, int incrementBy) { return client.IncrementValueInHash(hashId, key, incrementBy); } @@ -97,7 +97,7 @@ public bool Remove(KeyValuePair item) public int Count { - get { return client.GetHashCount(hashId); } + get { return (int)client.GetHashCount(hashId); } } public bool IsReadOnly diff --git a/src/ServiceStack.Redis/RedisClientList.Async.cs b/src/ServiceStack.Redis/RedisClientList.Async.cs new file mode 100644 index 00000000..a995081b --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientList.Async.cs @@ -0,0 +1,161 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientList + : IRedisListAsync + { + private IRedisClientAsync AsyncClient => client; + private IRedisListAsync AsAsync() => this; + + ValueTask IRedisListAsync.AppendAsync(string value, CancellationToken token) + => AsyncClient.AddItemToListAsync(listId, value, token); + + ValueTask IRedisListAsync.BlockingDequeueAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingDequeueItemFromListAsync(listId, timeOut, token); + + ValueTask IRedisListAsync.BlockingPopAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingPopItemFromListAsync(listId, timeOut, token); + + ValueTask IRedisListAsync.BlockingRemoveStartAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingRemoveStartFromListAsync(listId, timeOut, token); + + ValueTask IRedisListAsync.CountAsync(CancellationToken token) + => AsyncClient.GetListCountAsync(listId, token).AsInt32(); + + ValueTask IRedisListAsync.DequeueAsync(CancellationToken token) + => AsyncClient.DequeueItemFromListAsync(listId, token); + + ValueTask IRedisListAsync.EnqueueAsync(string value, CancellationToken token) + => AsyncClient.EnqueueItemOnListAsync(listId, value, token); + + ValueTask> IRedisListAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromListAsync(listId, token); + + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromListAsync(listId, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator() + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetRangeFromListAsync(listId, skip, skip + PageLimit - 1, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask> IRedisListAsync.GetRangeAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetRangeFromListAsync(listId, startingFrom, endingAt, token); + + ValueTask> IRedisListAsync.GetRangeFromSortedListAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetRangeFromSortedListAsync(listId, startingFrom, endingAt, token); + + ValueTask IRedisListAsync.PopAndPushAsync(IRedisListAsync toList, CancellationToken token) + => AsyncClient.PopAndPushItemBetweenListsAsync(listId, toList.Id, token); + + ValueTask IRedisListAsync.PopAsync(CancellationToken token) + => AsyncClient.PopItemFromListAsync(listId, token); + + ValueTask IRedisListAsync.PrependAsync(string value, CancellationToken token) + => AsyncClient.PrependItemToListAsync(listId, value, token); + + ValueTask IRedisListAsync.PushAsync(string value, CancellationToken token) + => AsyncClient.PushItemToListAsync(listId, value, token); + + ValueTask IRedisListAsync.RemoveAllAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(listId, token); + + ValueTask IRedisListAsync.RemoveEndAsync(CancellationToken token) + => AsyncClient.RemoveEndFromListAsync(listId, token); + + ValueTask IRedisListAsync.RemoveStartAsync(CancellationToken token) + => AsyncClient.RemoveStartFromListAsync(listId, token); + + ValueTask IRedisListAsync.RemoveValueAsync(string value, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(listId, value, token); + + ValueTask IRedisListAsync.RemoveValueAsync(string value, int noOfMatches, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(listId, value, noOfMatches, token); + + ValueTask IRedisListAsync.TrimAsync(int keepStartingFrom, int keepEndingAt, CancellationToken token) + => AsyncClient.TrimListAsync(listId, keepStartingFrom, keepEndingAt, token); + + async ValueTask IRedisListAsync.RemoveAsync(string value, CancellationToken token) + => (await AsyncClient.RemoveItemFromListAsync(listId, value, token).ConfigureAwait(false)) > 0; + + ValueTask IRedisListAsync.AddAsync(string value, CancellationToken token) + => AsyncClient.AddItemToListAsync(listId, value, token); + + async ValueTask IRedisListAsync.RemoveAtAsync(int index, CancellationToken token) + { + //TODO: replace with native implementation when one exists + var markForDelete = Guid.NewGuid().ToString(); + await AsyncClient.SetItemInListAsync(listId, index, markForDelete, token).ConfigureAwait(false); + await AsyncClient.RemoveItemFromListAsync(listId, markForDelete, token).ConfigureAwait(false); + } + + async ValueTask IRedisListAsync.ContainsAsync(string value, CancellationToken token) + { + //TODO: replace with native implementation when exists + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (existingItem == value) return true; + } + return false; + } + + ValueTask IRedisListAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(listId, token); + + async ValueTask IRedisListAsync.IndexOfAsync(string value, CancellationToken token) + { + //TODO: replace with native implementation when exists + var i = 0; + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (existingItem == value) return i; + i++; + } + return -1; + } + + ValueTask IRedisListAsync.ElementAtAsync(int index, CancellationToken token) + => AsyncClient.GetItemFromListAsync(listId, index, token); + + ValueTask IRedisListAsync.SetValueAsync(int index, string value, CancellationToken token) + => AsyncClient.SetItemInListAsync(listId, index, value, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientList.cs b/src/ServiceStack.Redis/RedisClientList.cs index 8f77a250..1777723e 100644 --- a/src/ServiceStack.Redis/RedisClientList.cs +++ b/src/ServiceStack.Redis/RedisClientList.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -19,7 +19,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis list operations under a IList[string] interface. /// - internal class RedisClientList + internal partial class RedisClientList : IRedisList { private readonly RedisClient client; @@ -99,7 +99,7 @@ public int Count { get { - return client.GetListCount(listId); + return (int)client.GetListCount(listId); } } @@ -163,12 +163,12 @@ public void Trim(int keepStartingFrom, int keepEndingAt) client.TrimList(listId, keepStartingFrom, keepEndingAt); } - public int RemoveValue(string value) + public long RemoveValue(string value) { return client.RemoveItemFromList(listId, value); } - public int RemoveValue(string value, int noOfMatches) + public long RemoveValue(string value, int noOfMatches) { return client.RemoveItemFromList(listId, value, noOfMatches); } diff --git a/src/ServiceStack.Redis/RedisClientManagerCacheClient.Async.cs b/src/ServiceStack.Redis/RedisClientManagerCacheClient.Async.cs new file mode 100644 index 00000000..f77928b3 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientManagerCacheClient.Async.cs @@ -0,0 +1,165 @@ +using ServiceStack.Caching; +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisClientManagerCacheClient : ICacheClientAsync, IRemoveByPatternAsync, IAsyncDisposable + { + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + private ValueTask GetClientAsync(in CancellationToken token) + { + AssertNotReadOnly(); + return redisManager.GetClientAsync(token); + } + + async Task ICacheClientAsync.GetAsync(string key, CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + return await client.GetAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.FlushAllAsync(CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + await client.FlushAllAsync(token).ConfigureAwait(false); + } + + async Task> ICacheClientAsync.GetAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + return await client.GetAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAllAsync(IDictionary values, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + await client.SetAllAsync(values, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.RemoveAsync(string key, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.RemoveAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + return await client.GetTimeToLiveAsync(key, token).ConfigureAwait(false); + } + + async IAsyncEnumerable ICacheClientAsync.GetKeysByPatternAsync(string pattern, [EnumeratorCancellation] CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + await foreach (var key in client.GetKeysByPatternAsync(pattern, token).ConfigureAwait(false).WithCancellation(token)) + { + yield return key; + } + } + + Task ICacheClientAsync.RemoveExpiredEntriesAsync(CancellationToken token) + { + //Redis automatically removed expired Cache Entries + return Task.CompletedTask; + } + + async Task IRemoveByPatternAsync.RemoveByPatternAsync(string pattern, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + if (client is IRemoveByPatternAsync redisClient) + { + await redisClient.RemoveByPatternAsync(pattern, token).ConfigureAwait(false); + } + } + + async Task IRemoveByPatternAsync.RemoveByRegexAsync(string regex, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + if (client is IRemoveByPatternAsync redisClient) + { + await redisClient.RemoveByRegexAsync(regex, token).ConfigureAwait(false); + } + } + + async Task ICacheClientAsync.RemoveAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + await client.RemoveAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.IncrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.IncrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.DecrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.DecrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs b/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs index f9b49593..6557bb46 100644 --- a/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs +++ b/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs @@ -1,10 +1,19 @@ using System; using System.Collections.Generic; -using ServiceStack.CacheAccess; +using System.Linq; +using ServiceStack.Caching; namespace ServiceStack.Redis { - public class RedisClientManagerCacheClient : ICacheClient + /// + /// For interoperability GetCacheClient() and GetReadOnlyCacheClient() + /// return an ICacheClient wrapper around the redis manager which has the affect of calling + /// GetClient() for all write operations and GetReadOnlyClient() for the read ones. + /// + /// This works well for master-replica replication scenarios where you have + /// 1 master that replicates to multiple read replicas. + /// + public partial class RedisClientManagerCacheClient : ICacheClient, IRemoveByPattern, ICacheClientExtended { private readonly IRedisClientsManager redisManager; @@ -18,30 +27,26 @@ public RedisClientManagerCacheClient(IRedisClientsManager redisManager) /// /// Ignore dispose on RedisClientsManager, which should be registered as a singleton /// - public void Dispose() {} + public void Dispose() { } public T Get(string key) { - using (var client = redisManager.GetReadOnlyClient()) - { - return client.Get(key); - } + using var client = redisManager.GetReadOnlyClient(); + return client.Get(key); } public IDictionary GetAll(IEnumerable keys) { - using (var client = redisManager.GetReadOnlyClient()) - { - return client.GetAll(keys); - } + using var client = redisManager.GetReadOnlyClient(); + return client.GetAll(keys); } private void AssertNotReadOnly() { - if (this.ReadOnly) + if (this.ReadOnly) throw new InvalidOperationException("Cannot perform write operations on a Read-only client"); } - + public ICacheClient GetClient() { AssertNotReadOnly(); @@ -50,122 +55,132 @@ public ICacheClient GetClient() public bool Remove(string key) { - using (var client = GetClient()) - { - return client.Remove(key); - } + using var client = GetClient(); + return client.Remove(key); } public void RemoveAll(IEnumerable keys) { - using (var client = GetClient()) - { - client.RemoveAll(keys); - } + using var client = GetClient(); + client.RemoveAll(keys); } public long Increment(string key, uint amount) { - using (var client = GetClient()) - { - return client.Increment(key, amount); - } + using var client = GetClient(); + return client.Increment(key, amount); } public long Decrement(string key, uint amount) { - using (var client = GetClient()) - { - return client.Decrement(key, amount); - } + using var client = GetClient(); + return client.Decrement(key, amount); } public bool Add(string key, T value) { - using (var client = GetClient()) - { - return client.Add(key, value); - } + using var client = GetClient(); + return client.Add(key, value); } public bool Set(string key, T value) { - using (var client = GetClient()) - { - return client.Set(key, value); - } + using var client = GetClient(); + return client.Set(key, value); } public bool Replace(string key, T value) { - using (var client = GetClient()) - { - return client.Replace(key, value); - } + using var client = GetClient(); + return client.Replace(key, value); } public bool Add(string key, T value, DateTime expiresAt) { - using (var client = GetClient()) - { - return client.Add(key, value, expiresAt); - } + using var client = GetClient(); + return client.Add(key, value, expiresAt); } public bool Set(string key, T value, DateTime expiresAt) { - using (var client = GetClient()) - { - return client.Set(key, value, expiresAt); - } + using var client = GetClient(); + return client.Set(key, value, expiresAt); } public bool Replace(string key, T value, DateTime expiresAt) { - using (var client = GetClient()) - { - return client.Replace(key, value, expiresAt); - } + using var client = GetClient(); + return client.Replace(key, value, expiresAt); } public bool Add(string key, T value, TimeSpan expiresIn) { - using (var client = GetClient()) - { - return client.Set(key, value, expiresIn); - } + using var client = GetClient(); + return client.Set(key, value, expiresIn); } public bool Set(string key, T value, TimeSpan expiresIn) { - using (var client = GetClient()) - { - return client.Set(key, value, expiresIn); - } + using var client = GetClient(); + return client.Set(key, value, expiresIn); } public bool Replace(string key, T value, TimeSpan expiresIn) { - using (var client = GetClient()) + using var client = GetClient(); + return client.Replace(key, value, expiresIn); + } + + public void FlushAll() + { + using var client = GetClient(); + client.FlushAll(); + } + + public void SetAll(IDictionary values) + { + using var client = GetClient(); + client.SetAll(values); + } + + public void RemoveByPattern(string pattern) + { + using var client = GetClient(); + if (client is IRemoveByPattern redisClient) { - return client.Replace(key, value, expiresIn); + redisClient.RemoveByPattern(pattern); } } - public void FlushAll() + public void RemoveByRegex(string pattern) { - using (var client = GetClient()) + using var client = GetClient(); + if (client is IRemoveByPattern redisClient) { - client.FlushAll(); + redisClient.RemoveByRegex(pattern); } } - public void SetAll(IDictionary values) + public TimeSpan? GetTimeToLive(string key) { - using (var client = GetClient()) + using var client = GetClient(); + if (client is ICacheClientExtended redisClient) { - client.SetAll(values); + return redisClient.GetTimeToLive(key); } + + return null; + } + + public IEnumerable GetKeysByPattern(string pattern) + { + using var client = (ICacheClientExtended)GetClient(); + return client.GetKeysByPattern(pattern).ToList(); + } + + public void RemoveExpiredEntries() + { + //Redis automatically removed expired Cache Entries } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientManagerConfig.cs b/src/ServiceStack.Redis/RedisClientManagerConfig.cs index 044b8e4f..e3052805 100644 --- a/src/ServiceStack.Redis/RedisClientManagerConfig.cs +++ b/src/ServiceStack.Redis/RedisClientManagerConfig.cs @@ -1,27 +1,27 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // namespace ServiceStack.Redis { - public class RedisClientManagerConfig - { - public RedisClientManagerConfig() - { - AutoStart = true; //Simplifies the most common use-case - registering in an IOC - } + public class RedisClientManagerConfig + { + public RedisClientManagerConfig() + { + AutoStart = true; //Simplifies the most common use-case - registering in an IOC + } - public int? DefaultDb { get; set; } - public int MaxReadPoolSize { get; set; } - public int MaxWritePoolSize { get; set; } - public bool AutoStart { get; set; } - } + public long? DefaultDb { get; set; } + public int MaxReadPoolSize { get; set; } + public int MaxWritePoolSize { get; set; } + public bool AutoStart { get; set; } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientSet.Async.cs b/src/ServiceStack.Redis/RedisClientSet.Async.cs new file mode 100644 index 00000000..b8d2d55f --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientSet.Async.cs @@ -0,0 +1,118 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientSet + : IRedisSetAsync + { + private IRedisSetAsync AsAsync() => this; + private IRedisClientAsync AsyncClient => client; + + ValueTask IRedisSetAsync.AddAsync(string item, CancellationToken token) + => AsyncClient.AddItemToSetAsync(setId, item, token); + + ValueTask IRedisSetAsync.ClearAsync(CancellationToken token) + => new ValueTask(AsyncClient.RemoveAsync(setId, token)); + + ValueTask IRedisSetAsync.ContainsAsync(string item, CancellationToken token) + => AsyncClient.SetContainsItemAsync(setId, item, token); + + ValueTask IRedisSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSetCountAsync(setId, token).AsInt32(); + + ValueTask> IRedisSetAsync.DiffAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.GetDifferencesFromSetAsync(setId, withSetIds, token); + } + + ValueTask> IRedisSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSetAsync(setId, token); + + IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + => AsyncClient.ScanAllSetItemsAsync(setId, token: token).GetAsyncEnumerator(token); // uses SSCAN + + ValueTask IRedisSetAsync.GetRandomEntryAsync(CancellationToken token) + => AsyncClient.GetRandomItemFromSetAsync(setId, token); + + ValueTask> IRedisSetAsync.GetRangeFromSortedSetAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetSortedEntryValuesAsync(setId, startingFrom, endingAt, token); + + ValueTask> IRedisSetAsync.IntersectAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var allSetIds = MergeSetIds(withSets); + return AsyncClient.GetIntersectFromSetsAsync(allSetIds.ToArray(), token); + } + + ValueTask> IRedisSetAsync.IntersectAsync(params IRedisSetAsync[] withSets) + => AsAsync().IntersectAsync(withSets, token: default); + + private List MergeSetIds(IRedisSetAsync[] withSets) + { + var allSetIds = new List { setId }; + allSetIds.AddRange(withSets.ToList().ConvertAll(x => x.Id)); + return allSetIds; + } + + ValueTask IRedisSetAsync.MoveAsync(string value, IRedisSetAsync toSet, CancellationToken token) + => AsyncClient.MoveBetweenSetsAsync(setId, toSet.Id, value, token); + + ValueTask IRedisSetAsync.PopAsync(CancellationToken token) + => AsyncClient.PopItemFromSetAsync(setId, token); + + ValueTask IRedisSetAsync.RemoveAsync(string item, CancellationToken token) + => AsyncClient.RemoveItemFromSetAsync(setId, item, token).AwaitAsTrue(); // see Remove for why true + + ValueTask IRedisSetAsync.StoreDiffAsync(IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.StoreDifferencesFromSetAsync(setId, fromSet.Id, withSetIds, token); + } + + ValueTask IRedisSetAsync.StoreDiffAsync(IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().StoreDiffAsync(fromSet, withSets, token: default); + + ValueTask IRedisSetAsync.StoreIntersectAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.StoreIntersectFromSetsAsync(setId, withSetIds, token); + } + + ValueTask IRedisSetAsync.StoreIntersectAsync(params IRedisSetAsync[] withSets) + => AsAsync().StoreIntersectAsync(withSets, token: default); + + ValueTask IRedisSetAsync.StoreUnionAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.StoreUnionFromSetsAsync(setId, withSetIds, token); + } + + ValueTask IRedisSetAsync.StoreUnionAsync(params IRedisSetAsync[] withSets) + => AsAsync().StoreUnionAsync(withSets, token: default); + + ValueTask> IRedisSetAsync.UnionAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var allSetIds = MergeSetIds(withSets); + return AsyncClient.GetUnionFromSetsAsync(allSetIds.ToArray(), token); + } + + ValueTask> IRedisSetAsync.UnionAsync(params IRedisSetAsync[] withSets) + => AsAsync().UnionAsync(withSets, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientSet.cs b/src/ServiceStack.Redis/RedisClientSet.cs index f5128008..daa58653 100644 --- a/src/ServiceStack.Redis/RedisClientSet.cs +++ b/src/ServiceStack.Redis/RedisClientSet.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -20,7 +20,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientSet + internal partial class RedisClientSet : IRedisSet { private readonly RedisClient client; @@ -91,7 +91,7 @@ public int Count { get { - return client.GetSetCount(setId); + return (int)client.GetSetCount(setId); } } diff --git a/src/ServiceStack.Redis/RedisClientSortedSet.Async.cs b/src/ServiceStack.Redis/RedisClientSortedSet.Async.cs new file mode 100644 index 00000000..a7afd56e --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientSortedSet.Async.cs @@ -0,0 +1,102 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientSortedSet + : IRedisSortedSetAsync + { + private IRedisClientAsync AsyncClient => client; + + ValueTask IRedisSortedSetAsync.AddAsync(string value, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(setId, value, token).Await(); + + private IRedisSortedSetAsync AsAsync() => this; + + ValueTask IRedisSortedSetAsync.ClearAsync(CancellationToken token) + => new ValueTask(AsyncClient.RemoveAsync(setId, token)); + + ValueTask IRedisSortedSetAsync.ContainsAsync(string value, CancellationToken token) + => AsyncClient.SortedSetContainsItemAsync(setId, value, token); + + ValueTask IRedisSortedSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSortedSetCountAsync(setId, token).AsInt32(); + + ValueTask> IRedisSortedSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetAsync(setId, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + // uses ZSCAN + await foreach (var pair in AsyncClient.ScanAllSortedSetItemsAsync(setId, token: token).ConfigureAwait(false)) + { + yield return pair.Key; + } + } + + ValueTask IRedisSortedSetAsync.GetItemIndexAsync(string value, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetAsync(setId, value, token); + + ValueTask IRedisSortedSetAsync.GetItemScoreAsync(string value, CancellationToken token) + => AsyncClient.GetItemScoreInSortedSetAsync(setId, value, token); + + ValueTask> IRedisSortedSetAsync.GetRangeAsync(int startingRank, int endingRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetAsync(setId, startingRank, endingRank, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeByScoreAsync(fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(setId, fromStringScore, toStringScore, skip, take, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeByScoreAsync(fromScore, toScore, null, null, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, skip, take, token); + + ValueTask IRedisSortedSetAsync.IncrementItemScoreAsync(string value, double incrementByScore, CancellationToken token) + => AsyncClient.IncrementItemInSortedSetAsync(setId, value, incrementByScore, token).Await(); + + ValueTask IRedisSortedSetAsync.PopItemWithHighestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithHighestScoreFromSortedSetAsync(setId, token); + + ValueTask IRedisSortedSetAsync.PopItemWithLowestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithLowestScoreFromSortedSetAsync(setId, token); + + ValueTask IRedisSortedSetAsync.RemoveAsync(string value, CancellationToken token) + => AsyncClient.RemoveItemFromSortedSetAsync(setId, value, token).AwaitAsTrue(); // see Remove() for why "true" + + ValueTask IRedisSortedSetAsync.RemoveRangeAsync(int fromRank, int toRank, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetAsync(setId, fromRank, toRank, token).Await(); + + ValueTask IRedisSortedSetAsync.RemoveRangeByScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetByScoreAsync(setId, fromScore, toScore, token).Await(); + + ValueTask IRedisSortedSetAsync.StoreFromIntersectAsync(IRedisSortedSetAsync[] ofSets, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(setId, ofSets.GetIds(), token).Await(); + + ValueTask IRedisSortedSetAsync.StoreFromIntersectAsync(params IRedisSortedSetAsync[] ofSets) + => AsAsync().StoreFromIntersectAsync(ofSets, token: default); + + ValueTask IRedisSortedSetAsync.StoreFromUnionAsync(IRedisSortedSetAsync[] ofSets, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(setId, ofSets.GetIds(), token).Await(); + + ValueTask IRedisSortedSetAsync.StoreFromUnionAsync(params IRedisSortedSetAsync[] ofSets) + => AsAsync().StoreFromUnionAsync(ofSets, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientSortedSet.cs b/src/ServiceStack.Redis/RedisClientSortedSet.cs index c55d9d29..adde5d12 100644 --- a/src/ServiceStack.Redis/RedisClientSortedSet.cs +++ b/src/ServiceStack.Redis/RedisClientSortedSet.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; @@ -20,7 +20,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientSortedSet + internal partial class RedisClientSortedSet : IRedisSortedSet { private readonly RedisClient client; @@ -91,7 +91,7 @@ public int Count { get { - return client.GetSortedSetCount(setId); + return (int)client.GetSortedSetCount(setId); } } @@ -144,15 +144,15 @@ public void RemoveRangeByScore(double fromScore, double toScore) public void StoreFromIntersect(params IRedisSortedSet[] ofSets) { - client.StoreIntersectFromSets(setId, ofSets.GetIds()); + client.StoreIntersectFromSortedSets(setId, ofSets.GetIds()); } public void StoreFromUnion(params IRedisSortedSet[] ofSets) { - client.StoreUnionFromSets(setId, ofSets.GetIds()); + client.StoreUnionFromSortedSets(setId, ofSets.GetIds()); } - public int GetItemIndex(string value) + public long GetItemIndex(string value) { return client.GetItemIndexInSortedSet(setId, value); } diff --git a/src/ServiceStack.Redis/RedisClient_Admin.cs b/src/ServiceStack.Redis/RedisClient_Admin.cs new file mode 100644 index 00000000..804df5f1 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_Admin.cs @@ -0,0 +1,134 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using System; +using System.Collections.Generic; +using System.Text; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public partial class RedisClient + : IRedisClient + { + public void SetConfig(string configItem, string value) + { + base.ConfigSet(configItem, value.ToUtf8Bytes()); + } + + public RedisText GetServerRoleInfo() + { + return base.Role(); + } + + public string GetConfig(string configItem) + { + var byteArray = base.ConfigGet(configItem); + return GetConfigParse(byteArray); + } + + static string GetConfigParse(byte[][] byteArray) + { + var sb = StringBuilderCache.Allocate(); + const int startAt = 1; //skip repeating config name + for (var i = startAt; i < byteArray.Length; i++) + { + var bytes = byteArray[i]; + if (sb.Length > 0) + sb.Append(" "); + + sb.Append(bytes.FromUtf8Bytes()); + } + return StringBuilderCache.ReturnAndFree(sb); + } + + public void SaveConfig() + { + base.ConfigRewrite(); + } + + public void ResetInfoStats() + { + base.ConfigResetStat(); + } + + public string GetClient() + { + return base.ClientGetName(); + } + + public void SetClient(string name) + { + base.ClientSetName(name); + } + + public void KillClient(string address) + { + base.ClientKill(address); + } + + public long KillClients(string fromAddress = null, string withId = null, RedisClientType? ofType = null, bool? skipMe = null) + { + var typeString = ofType != null ? ofType.ToString().ToLower() : null; + var skipMeString = skipMe != null ? (skipMe.Value ? "yes" : "no") : null; + return base.ClientKill(addr: fromAddress, id: withId, type: typeString, skipMe: skipMeString); + } + + public List> GetClientsInfo() + { + return GetClientsInfoParse(ClientList()); + } + private static List> GetClientsInfoParse(byte[] rawResult) + { + var clientList = rawResult.FromUtf8Bytes(); + var results = new List>(); + + var lines = clientList.Split('\n'); + foreach (var line in lines) + { + if (String.IsNullOrEmpty(line)) continue; + + var map = new Dictionary(); + var parts = line.Split(' '); + foreach (var part in parts) + { + var keyValue = part.SplitOnFirst('='); + map[keyValue[0]] = keyValue[1]; + } + results.Add(map); + } + return results; + } + + public void PauseAllClients(TimeSpan duration) + { + base.ClientPause((int)duration.TotalMilliseconds); + } + + public DateTime GetServerTime() + { + var parts = base.Time(); + return ParseTimeResult(parts); + } + + private static DateTime ParseTimeResult(byte[][] parts) + { + var unixTime = long.Parse(parts[0].FromUtf8Bytes()); + var microSecs = long.Parse(parts[1].FromUtf8Bytes()); + var ticks = microSecs / 1000 * TimeSpan.TicksPerMillisecond; + + var date = unixTime.FromUnixTime(); + var timeSpan = TimeSpan.FromTicks(ticks); + return date + timeSpan; + } + } +} diff --git a/src/ServiceStack.Redis/RedisClient_Hash.Async.cs b/src/ServiceStack.Redis/RedisClient_Hash.Async.cs new file mode 100644 index 00000000..9832b5d4 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_Hash.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient + { + internal partial class RedisClientHashes + : IHasNamed + { + IRedisHashAsync IHasNamed.this[string hashId] + { + get => new RedisClientHash(client, hashId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_Hash.cs b/src/ServiceStack.Redis/RedisClient_Hash.cs index 9d3fb33a..afc71b60 100644 --- a/src/ServiceStack.Redis/RedisClient_Hash.cs +++ b/src/ServiceStack.Redis/RedisClient_Hash.cs @@ -1,143 +1,150 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Linq; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Model; using ServiceStack.Text; namespace ServiceStack.Redis { - public partial class RedisClient - : IRedisClient - { - public IHasNamed Hashes { get; set; } - - internal class RedisClientHashes - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientHashes(RedisClient client) - { - this.client = client; - } - - public IRedisHash this[string hashId] - { - get - { - return new RedisClientHash(client, hashId); - } - set - { - var hash = this[hashId]; - hash.Clear(); - hash.CopyTo(value.ToArray(), 0); - } - } - } - - public bool SetEntryInHash(string hashId, string key, string value) - { - return base.HSet(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes()) == Success; - } - - public bool SetEntryInHashIfNotExists(string hashId, string key, string value) - { - return base.HSetNX(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes()) == Success; - } - - public void SetRangeInHash(string hashId, IEnumerable> keyValuePairs) - { - var keyValuePairsList = keyValuePairs.ToList(); - if (keyValuePairsList.Count == 0) return; - - var keys = new byte[keyValuePairsList.Count][]; - var values = new byte[keyValuePairsList.Count][]; - - for (var i = 0; i < keyValuePairsList.Count; i++) - { - var kvp = keyValuePairsList[i]; - keys[i] = kvp.Key.ToUtf8Bytes(); - values[i] = kvp.Value.ToUtf8Bytes(); - } - - base.HMSet(hashId, keys, values); - } - - public int IncrementValueInHash(string hashId, string key, int incrementBy) + public partial class RedisClient + : IRedisClient + { + public IHasNamed Hashes { get; set; } + + internal partial class RedisClientHashes + : IHasNamed + { + private readonly RedisClient client; + + public RedisClientHashes(RedisClient client) + { + this.client = client; + } + + public IRedisHash this[string hashId] + { + get + { + return new RedisClientHash(client, hashId); + } + set + { + var hash = this[hashId]; + hash.Clear(); + hash.CopyTo(value.ToArray(), 0); + } + } + } + + public bool SetEntryInHash(string hashId, string key, string value) + { + return base.HSet(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes()) == Success; + } + + public bool SetEntryInHashIfNotExists(string hashId, string key, string value) + { + return base.HSetNX(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes()) == Success; + } + + public void SetRangeInHash(string hashId, IEnumerable> keyValuePairs) + { + if (SetRangeInHashPrepare(keyValuePairs, out var keys, out var values)) + { + base.HMSet(hashId, keys, values); + } + } + bool SetRangeInHashPrepare(IEnumerable> keyValuePairs, out byte[][] keys, out byte[][] values) + { + var keyValuePairsList = keyValuePairs.ToList(); + if (keyValuePairsList.Count == 0) + { + keys = values = default; + return false; + } + + keys = new byte[keyValuePairsList.Count][]; + values = new byte[keyValuePairsList.Count][]; + + for (var i = 0; i < keyValuePairsList.Count; i++) + { + var kvp = keyValuePairsList[i]; + keys[i] = kvp.Key.ToUtf8Bytes(); + values[i] = kvp.Value.ToUtf8Bytes(); + } + return true; + } + + public long IncrementValueInHash(string hashId, string key, int incrementBy) { return base.HIncrby(hashId, key.ToUtf8Bytes(), incrementBy); } - public int IncrementValueInHash(string hashId, string key, long incrementBy) + public long IncrementValueInHash(string hashId, string key, long incrementBy) { return base.HIncrby(hashId, key.ToUtf8Bytes(), incrementBy); } + public double IncrementValueInHash(string hashId, string key, double incrementBy) + { + return base.HIncrbyFloat(hashId, key.ToUtf8Bytes(), incrementBy); + } + public string GetValueFromHash(string hashId, string key) - { - return base.HGet(hashId, key.ToUtf8Bytes()).FromUtf8Bytes(); - } - - public bool HashContainsEntry(string hashId, string key) - { - return base.HExists(hashId, key.ToUtf8Bytes()) == Success; - } - - public bool RemoveEntryFromHash(string hashId, string key) - { - return base.HDel(hashId, key.ToUtf8Bytes()) == Success; - } - - public int GetHashCount(string hashId) - { - return base.HLen(hashId); - } - - public List GetHashKeys(string hashId) - { - var multiDataList = base.HKeys(hashId); - return multiDataList.ToStringList(); - } - - public List GetHashValues(string hashId) - { - var multiDataList = base.HVals(hashId); - return multiDataList.ToStringList(); - } - - public Dictionary GetAllEntriesFromHash(string hashId) - { - var multiDataList = base.HGetAll(hashId); - var map = new Dictionary(); - - for (var i = 0; i < multiDataList.Length; i += 2) - { - var key = multiDataList[i].FromUtf8Bytes(); - map[key] = multiDataList[i + 1].FromUtf8Bytes(); - } - - return map; - } - - public List GetValuesFromHash(string hashId, params string[] keys) - { - if (keys.Length == 0) return new List(); - var keyBytes = ConvertToBytes(keys); - var multiDataList = base.HMGet(hashId, keyBytes); - return multiDataList.ToStringList(); - } - } + { + return base.HGet(hashId, key.ToUtf8Bytes()).FromUtf8Bytes(); + } + + public bool HashContainsEntry(string hashId, string key) + { + return base.HExists(hashId, key.ToUtf8Bytes()) == Success; + } + + public bool RemoveEntryFromHash(string hashId, string key) + { + return base.HDel(hashId, key.ToUtf8Bytes()) == Success; + } + + public long GetHashCount(string hashId) + { + return base.HLen(hashId); + } + + public List GetHashKeys(string hashId) + { + var multiDataList = base.HKeys(hashId); + return multiDataList.ToStringList(); + } + + public List GetHashValues(string hashId) + { + var multiDataList = base.HVals(hashId); + return multiDataList.ToStringList(); + } + + public Dictionary GetAllEntriesFromHash(string hashId) + { + var multiDataList = base.HGetAll(hashId); + return multiDataList.ToStringDictionary(); + } + + public List GetValuesFromHash(string hashId, params string[] keys) + { + if (keys.Length == 0) return new List(); + var keyBytes = ConvertToBytes(keys); + var multiDataList = base.HMGet(hashId, keyBytes); + return multiDataList.ToStringList(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_List.Async.cs b/src/ServiceStack.Redis/RedisClient_List.Async.cs new file mode 100644 index 00000000..5b04ac05 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_List.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient + { + internal partial class RedisClientLists + : IHasNamed + { + IRedisListAsync IHasNamed.this[string listId] + { + get => new RedisClientList(client, listId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_List.cs b/src/ServiceStack.Redis/RedisClient_List.cs index 5b5452fb..d723b975 100644 --- a/src/ServiceStack.Redis/RedisClient_List.cs +++ b/src/ServiceStack.Redis/RedisClient_List.cs @@ -1,137 +1,150 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Linq; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Model; +using ServiceStack.Redis.Pipeline; using ServiceStack.Text; namespace ServiceStack.Redis { - public partial class RedisClient - : IRedisClient - { - const int FirstElement = 0; - const int LastElement = -1; - - public IHasNamed Lists { get; set; } - - internal class RedisClientLists - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientLists(RedisClient client) - { - this.client = client; - } - - public IRedisList this[string listId] - { - get - { - return new RedisClientList(client, listId); - } - set - { - var list = this[listId]; - list.Clear(); - list.CopyTo(value.ToArray(), 0); - } - } - } - - public List GetAllItemsFromList(string listId) - { - var multiDataList = LRange(listId, FirstElement, LastElement); - return multiDataList.ToStringList(); - } - - public List GetRangeFromList(string listId, int startingFrom, int endingAt) - { - var multiDataList = LRange(listId, startingFrom, endingAt); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedList(string listId, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, SortAlpha = true }; - return GetSortedItemsFromList(listId, sortOptions); - } - - public List GetSortedItemsFromList(string listId, SortOptions sortOptions) - { - var multiDataList = Sort(listId, sortOptions); - return multiDataList.ToStringList(); - } - - public void AddItemToList(string listId, string value) - { - RPush(listId, value.ToUtf8Bytes()); - } - - public void AddRangeToList(string listId, List values) - { - var uListId = listId.ToUtf8Bytes(); - - var pipeline = CreatePipelineCommand(); - foreach (var value in values) - { - pipeline.WriteCommand(Commands.RPush, uListId, value.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); - } - - public void PrependItemToList(string listId, string value) - { - LPush(listId, value.ToUtf8Bytes()); - } - - public void PrependRangeToList(string listId, List values) - { - var uListId = listId.ToUtf8Bytes(); - - var pipeline = CreatePipelineCommand(); - //ensure list[0] == value[0] after batch operation - for (var i = values.Count - 1; i >= 0; i--) - { - var value = values[i]; - pipeline.WriteCommand(Commands.LPush, uListId, value.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); - } - - public void RemoveAllFromList(string listId) - { - LTrim(listId, LastElement, FirstElement); - } - - public string RemoveStartFromList(string listId) - { - return base.LPop(listId).FromUtf8Bytes(); - } - - public string BlockingRemoveStartFromList(string listId, TimeSpan? timeOut) - { + public partial class RedisClient + : IRedisClient + { + const int FirstElement = 0; + const int LastElement = -1; + + public IHasNamed Lists { get; set; } + + internal partial class RedisClientLists + : IHasNamed + { + private readonly RedisClient client; + + public RedisClientLists(RedisClient client) + { + this.client = client; + } + + public IRedisList this[string listId] + { + get + { + return new RedisClientList(client, listId); + } + set + { + var list = this[listId]; + list.Clear(); + list.CopyTo(value.ToArray(), 0); + } + } + } + + public List GetAllItemsFromList(string listId) + { + var multiDataList = LRange(listId, FirstElement, LastElement); + return multiDataList.ToStringList(); + } + + public List GetRangeFromList(string listId, int startingFrom, int endingAt) + { + var multiDataList = LRange(listId, startingFrom, endingAt); + return multiDataList.ToStringList(); + } + + public List GetRangeFromSortedList(string listId, int startingFrom, int endingAt) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, SortAlpha = true }; + return GetSortedItemsFromList(listId, sortOptions); + } + + public List GetSortedItemsFromList(string listId, SortOptions sortOptions) + { + var multiDataList = Sort(listId, sortOptions); + return multiDataList.ToStringList(); + } + + public void AddItemToList(string listId, string value) + { + RPush(listId, value.ToUtf8Bytes()); + } + + public void AddRangeToList(string listId, List values) + { + var pipeline = AddRangeToListPrepareNonFlushed(listId, values); + pipeline.Flush(); + + //the number of items after + var intResults = pipeline.ReadAllAsInts(); + } + + private RedisPipelineCommand AddRangeToListPrepareNonFlushed(string listId, List values) + { + var uListId = listId.ToUtf8Bytes(); + + var pipeline = CreatePipelineCommand(); + foreach (var value in values) + { + pipeline.WriteCommand(Commands.RPush, uListId, value.ToUtf8Bytes()); + } + return pipeline; + } + + public void PrependItemToList(string listId, string value) + { + LPush(listId, value.ToUtf8Bytes()); + } + + public void PrependRangeToList(string listId, List values) + { + var pipeline = PrependRangeToListPrepareNonFlushed(listId, values); + pipeline.Flush(); + + //the number of items after + var intResults = pipeline.ReadAllAsInts(); + } + + private RedisPipelineCommand PrependRangeToListPrepareNonFlushed(string listId, List values) + { + var uListId = listId.ToUtf8Bytes(); + + var pipeline = CreatePipelineCommand(); + //ensure list[0] == value[0] after batch operation + for (var i = values.Count - 1; i >= 0; i--) + { + var value = values[i]; + pipeline.WriteCommand(Commands.LPush, uListId, value.ToUtf8Bytes()); + } + return pipeline; + } + + public void RemoveAllFromList(string listId) + { + LTrim(listId, LastElement, FirstElement); + } + + public string RemoveStartFromList(string listId) + { + return base.LPop(listId).FromUtf8Bytes(); + } + + public string BlockingRemoveStartFromList(string listId, TimeSpan? timeOut) + { return BLPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } + } public ItemRef BlockingRemoveStartFromLists(string[] listIds, TimeSpan? timeOut) { @@ -141,95 +154,95 @@ public ItemRef BlockingRemoveStartFromLists(string[] listIds, TimeSpan? timeOut) return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; } - public string RemoveEndFromList(string listId) - { - return base.RPop(listId).FromUtf8Bytes(); - } - - public void TrimList(string listId, int keepStartingFrom, int keepEndingAt) - { - LTrim(listId, keepStartingFrom, keepEndingAt); - } - - public int RemoveItemFromList(string listId, string value) - { - return LRem(listId, 0, value.ToUtf8Bytes()); - } - - public int RemoveItemFromList(string listId, string value, int noOfMatches) - { - return LRem(listId, noOfMatches, value.ToUtf8Bytes()); - } - - public int GetListCount(string listId) - { - return LLen(listId); - } - - public string GetItemFromList(string listId, int listIndex) - { - return LIndex(listId, listIndex).FromUtf8Bytes(); - } - - public void SetItemInList(string listId, int listIndex, string value) - { - LSet(listId, listIndex, value.ToUtf8Bytes()); - } - - public void EnqueueItemOnList(string listId, string value) - { - LPush(listId, value.ToUtf8Bytes()); - } - - public string DequeueItemFromList(string listId) - { - return RPop(listId).FromUtf8Bytes(); - } - - public string BlockingDequeueItemFromList(string listId, TimeSpan? timeOut) - { - return BRPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } + public string RemoveEndFromList(string listId) + { + return base.RPop(listId).FromUtf8Bytes(); + } + + public void TrimList(string listId, int keepStartingFrom, int keepEndingAt) + { + LTrim(listId, keepStartingFrom, keepEndingAt); + } + + public long RemoveItemFromList(string listId, string value) + { + return LRem(listId, 0, value.ToUtf8Bytes()); + } + + public long RemoveItemFromList(string listId, string value, int noOfMatches) + { + return LRem(listId, noOfMatches, value.ToUtf8Bytes()); + } + + public long GetListCount(string listId) + { + return LLen(listId); + } + + public string GetItemFromList(string listId, int listIndex) + { + return LIndex(listId, listIndex).FromUtf8Bytes(); + } + + public void SetItemInList(string listId, int listIndex, string value) + { + LSet(listId, listIndex, value.ToUtf8Bytes()); + } + + public void EnqueueItemOnList(string listId, string value) + { + LPush(listId, value.ToUtf8Bytes()); + } + + public string DequeueItemFromList(string listId) + { + return RPop(listId).FromUtf8Bytes(); + } + + public string BlockingDequeueItemFromList(string listId, TimeSpan? timeOut) + { + return BRPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); + } public ItemRef BlockingDequeueItemFromLists(string[] listIds, TimeSpan? timeOut) { - var value = BRPopValue(listIds, (int) timeOut.GetValueOrDefault().TotalSeconds); - if( value == null ) + var value = BRPopValue(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds); + if (value == null) return null; - return new ItemRef{Id=value[0].FromUtf8Bytes(), Item= value[1].FromUtf8Bytes()}; - } + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } - public void PushItemToList(string listId, string value) - { - RPush(listId, value.ToUtf8Bytes()); - } + public void PushItemToList(string listId, string value) + { + RPush(listId, value.ToUtf8Bytes()); + } - public string PopItemFromList(string listId) - { - return RPop(listId).FromUtf8Bytes(); - } + public string PopItemFromList(string listId) + { + return RPop(listId).FromUtf8Bytes(); + } - public string BlockingPopItemFromList(string listId, TimeSpan? timeOut) - { - return BRPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } + public string BlockingPopItemFromList(string listId, TimeSpan? timeOut) + { + return BRPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); + } public ItemRef BlockingPopItemFromLists(string[] listIds, TimeSpan? timeOut) { - var value = BRPopValue(listIds, (int) timeOut.GetValueOrDefault().TotalSeconds); - if( value == null ) + var value = BRPopValue(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds); + if (value == null) return null; - return new ItemRef{Id=value[0].FromUtf8Bytes(), Item= value[1].FromUtf8Bytes()}; - } + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } - public string PopAndPushItemBetweenLists(string fromListId, string toListId) - { - return RPopLPush(fromListId, toListId).FromUtf8Bytes(); - } + public string PopAndPushItemBetweenLists(string fromListId, string toListId) + { + return RPopLPush(fromListId, toListId).FromUtf8Bytes(); + } - public string BlockingPopAndPushItemBetweenLists(string fromListId, string toListId, TimeSpan? timeOut) - { + public string BlockingPopAndPushItemBetweenLists(string fromListId, string toListId, TimeSpan? timeOut) + { return BRPopLPush(fromListId, toListId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } - } + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_Set.Async.cs b/src/ServiceStack.Redis/RedisClient_Set.Async.cs new file mode 100644 index 00000000..3e6f3b21 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_Set.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient + { + internal partial class RedisClientSets + : IHasNamed + { + IRedisSetAsync IHasNamed.this[string setId] + { + get => new RedisClientSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} diff --git a/src/ServiceStack.Redis/RedisClient_Set.cs b/src/ServiceStack.Redis/RedisClient_Set.cs index a8441fe6..c99a00c4 100644 --- a/src/ServiceStack.Redis/RedisClient_Set.cs +++ b/src/ServiceStack.Redis/RedisClient_Set.cs @@ -1,200 +1,280 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Linq; -using ServiceStack.Common.Extensions; -using ServiceStack.DesignPatterns.Model; +using System.Threading; +using ServiceStack.Common; +using ServiceStack.Model; using ServiceStack.Redis.Generic; using ServiceStack.Redis.Pipeline; using ServiceStack.Text; namespace ServiceStack.Redis { - public partial class RedisClient - : IRedisClient - { - public IHasNamed Sets { get; set; } - - internal class RedisClientSets - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientSets(RedisClient client) - { - this.client = client; - } - - public IRedisSet this[string setId] - { - get - { - return new RedisClientSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - private static HashSet CreateHashSet(byte[][] multiDataList) - { - var results = new HashSet(); - foreach (var multiData in multiDataList) - { - results.Add(multiData.FromUtf8Bytes()); - } - return results; - } - - public List GetSortedEntryValues(string setId, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; - var multiDataList = Sort(setId, sortOptions); - return multiDataList.ToStringList(); - } - - public HashSet GetAllItemsFromSet(string setId) - { - var multiDataList = SMembers(setId); - return CreateHashSet(multiDataList); - } - - public void AddItemToSet(string setId, string item) - { - SAdd(setId, item.ToUtf8Bytes()); - } - - public void AddRangeToSet(string setId, List items) - { - if (setId.IsNullOrEmpty()) - throw new ArgumentNullException("setId"); - if (items == null) - throw new ArgumentNullException("items"); - if (items.Count == 0) - return; - - if (this.Transaction != null) - { - var trans = this.Transaction as IRedisQueueableOperation; - if (trans == null) - throw new NotSupportedException("Cannot AddRangeToSet() when Transaction is: " + this.Transaction.GetType().Name); - - //Complete the first QueuedCommand() - AddItemToSet(setId, items[0]); - - //Add subsequent queued commands - for (var i = 1; i < items.Count; i++) - { - var item = items[i]; - trans.QueueCommand(c => c.AddItemToSet(setId, item)); - } - } - else - { - var uSetId = setId.ToUtf8Bytes(); - var pipeline = CreatePipelineCommand(); - foreach (var item in items) - { - pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); - } - } - - public void RemoveItemFromSet(string setId, string item) - { - SRem(setId, item.ToUtf8Bytes()); - } - - public string PopItemFromSet(string setId) - { - return SPop(setId).FromUtf8Bytes(); - } - - public void MoveBetweenSets(string fromSetId, string toSetId, string item) - { - SMove(fromSetId, toSetId, item.ToUtf8Bytes()); - } - - public int GetSetCount(string setId) - { - return SCard(setId); - } - - public bool SetContainsItem(string setId, string item) - { - return SIsMember(setId, item.ToUtf8Bytes()) == 1; - } - - public HashSet GetIntersectFromSets(params string[] setIds) - { - if (setIds.Length == 0) - return new HashSet(); - - var multiDataList = SInter(setIds); - return CreateHashSet(multiDataList); - } - - public void StoreIntersectFromSets(string intoSetId, params string[] setIds) - { - if (setIds.Length == 0) return; - - SInterStore(intoSetId, setIds); - } - - public HashSet GetUnionFromSets(params string[] setIds) - { - if (setIds.Length == 0) - return new HashSet(); - - var multiDataList = SUnion(setIds); - return CreateHashSet(multiDataList); - } - - public void StoreUnionFromSets(string intoSetId, params string[] setIds) - { - if (setIds.Length == 0) return; - - SUnionStore(intoSetId, setIds); - } - - public HashSet GetDifferencesFromSet(string fromSetId, params string[] withSetIds) - { - if (withSetIds.Length == 0) - return new HashSet(); - - var multiDataList = SDiff(fromSetId, withSetIds); - return CreateHashSet(multiDataList); - } - - public void StoreDifferencesFromSet(string intoSetId, string fromSetId, params string[] withSetIds) - { - if (withSetIds.Length == 0) return; - - SDiffStore(intoSetId, fromSetId, withSetIds); - } - - public string GetRandomItemFromSet(string setId) - { - return SRandMember(setId).FromUtf8Bytes(); - } - } -} \ No newline at end of file + public partial class RedisClient + : IRedisClient + { + public IHasNamed Sets { get; set; } + + internal partial class RedisClientSets + : IHasNamed + { + private readonly RedisClient client; + + public RedisClientSets(RedisClient client) + { + this.client = client; + } + + public IRedisSet this[string setId] + { + get + { + return new RedisClientSet(client, setId); + } + set + { + var col = this[setId]; + col.Clear(); + col.CopyTo(value.ToArray(), 0); + } + } + } + + private static HashSet CreateHashSet(byte[][] multiDataList) + { + var results = new HashSet(); + foreach (var multiData in multiDataList) + { + results.Add(multiData.FromUtf8Bytes()); + } + return results; + } + + public List GetSortedEntryValues(string setId, int startingFrom, int endingAt) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = Sort(setId, sortOptions); + return multiDataList.ToStringList(); + } + + public long AddGeoMember(string key, double longitude, double latitude, string member) + { + return base.GeoAdd(key, longitude, latitude, member); + } + + public long AddGeoMembers(string key, params RedisGeo[] geoPoints) + { + return base.GeoAdd(key, geoPoints); + } + + public double CalculateDistanceBetweenGeoMembers(string key, string fromMember, string toMember, string unit = null) + { + return base.GeoDist(key, fromMember, toMember, unit); + } + + public string[] GetGeohashes(string key, params string[] members) + { + return base.GeoHash(key, members); + } + + public List GetGeoCoordinates(string key, params string[] members) + { + return base.GeoPos(key, members); + } + + public string[] FindGeoMembersInRadius(string key, double longitude, double latitude, double radius, string unit) + { + var results = base.GeoRadius(key, longitude, latitude, radius, unit); + return ParseFindGeoMembersResult(results); + } + + private static string[] ParseFindGeoMembersResult(List results) + { + var to = new string[results.Count]; + for (var i = 0; i < results.Count; i++) + { + to[i] = results[i].Member; + } + return to; + } + + public List FindGeoResultsInRadius(string key, double longitude, double latitude, double radius, string unit, + int? count = null, bool? sortByNearest = null) + { + return base.GeoRadius(key, longitude, latitude, radius, unit, withCoords:true, withDist:true, withHash:true, count:count, asc: sortByNearest); + } + + public string[] FindGeoMembersInRadius(string key, string member, double radius, string unit) + { + var results = base.GeoRadiusByMember(key, member, radius, unit); + return ParseFindGeoMembersResult(results); + } + + public List FindGeoResultsInRadius(string key, string member, double radius, string unit, int? count = null, bool? sortByNearest = null) + { + return base.GeoRadiusByMember(key, member, radius, unit, withCoords: true, withDist: true, withHash: true, count: count, asc: sortByNearest); + } + + public HashSet GetAllItemsFromSet(string setId) + { + var multiDataList = SMembers(setId); + return CreateHashSet(multiDataList); + } + + public void AddItemToSet(string setId, string item) + { + SAdd(setId, item.ToUtf8Bytes()); + } + + public void AddRangeToSet(string setId, List items) + { + if (AddRangeToSetNeedsSend(setId, items)) + { + var uSetId = setId.ToUtf8Bytes(); + var pipeline = CreatePipelineCommand(); + foreach (var item in items) + { + pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); + } + pipeline.Flush(); + + //the number of items after + _ = pipeline.ReadAllAsInts(); + } + } + + bool AddRangeToSetNeedsSend(string setId, List items) + { + if (setId.IsNullOrEmpty()) + throw new ArgumentNullException("setId"); + if (items == null) + throw new ArgumentNullException("items"); + if (items.Count == 0) + return false; + + if (this.Transaction != null || this.Pipeline != null) + { + var queueable = this.Transaction as IRedisQueueableOperation + ?? this.Pipeline as IRedisQueueableOperation; + + if (queueable == null) + throw new NotSupportedException("Cannot AddRangeToSet() when Transaction is: " + this.Transaction.GetType().Name); + + //Complete the first QueuedCommand() + AddItemToSet(setId, items[0]); + + //Add subsequent queued commands + for (var i = 1; i < items.Count; i++) + { + var item = items[i]; + queueable.QueueCommand(c => c.AddItemToSet(setId, item)); + } + return false; + } + else + { + return true; + } + } + + public void RemoveItemFromSet(string setId, string item) + { + SRem(setId, item.ToUtf8Bytes()); + } + + public string PopItemFromSet(string setId) + { + return SPop(setId).FromUtf8Bytes(); + } + + public List PopItemsFromSet(string setId, int count) + { + return SPop(setId, count).ToStringList(); + } + + public void MoveBetweenSets(string fromSetId, string toSetId, string item) + { + SMove(fromSetId, toSetId, item.ToUtf8Bytes()); + } + + public long GetSetCount(string setId) + { + return SCard(setId); + } + + public bool SetContainsItem(string setId, string item) + { + return SIsMember(setId, item.ToUtf8Bytes()) == 1; + } + + public HashSet GetIntersectFromSets(params string[] setIds) + { + if (setIds.Length == 0) + return new HashSet(); + + var multiDataList = SInter(setIds); + return CreateHashSet(multiDataList); + } + + public void StoreIntersectFromSets(string intoSetId, params string[] setIds) + { + if (setIds.Length == 0) return; + + SInterStore(intoSetId, setIds); + } + + public HashSet GetUnionFromSets(params string[] setIds) + { + if (setIds.Length == 0) + return new HashSet(); + + var multiDataList = SUnion(setIds); + return CreateHashSet(multiDataList); + } + + public void StoreUnionFromSets(string intoSetId, params string[] setIds) + { + if (setIds.Length == 0) return; + + SUnionStore(intoSetId, setIds); + } + + public HashSet GetDifferencesFromSet(string fromSetId, params string[] withSetIds) + { + if (withSetIds.Length == 0) + return new HashSet(); + + var multiDataList = SDiff(fromSetId, withSetIds); + return CreateHashSet(multiDataList); + } + + public void StoreDifferencesFromSet(string intoSetId, string fromSetId, params string[] withSetIds) + { + if (withSetIds.Length == 0) return; + + SDiffStore(intoSetId, fromSetId, withSetIds); + } + + public string GetRandomItemFromSet(string setId) + { + return SRandMember(setId).FromUtf8Bytes(); + } + + public IEnumerable GetKeysByPattern(string pattern) + { + return ScanAllKeys(pattern); + } + } +} diff --git a/src/ServiceStack.Redis/RedisClient_Slowlog.cs b/src/ServiceStack.Redis/RedisClient_Slowlog.cs index 288b730b..ecde5244 100644 --- a/src/ServiceStack.Redis/RedisClient_Slowlog.cs +++ b/src/ServiceStack.Redis/RedisClient_Slowlog.cs @@ -1,47 +1,48 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Globalization; using System.Linq; -using ServiceStack.Common.Extensions; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Redis.Generic; -using ServiceStack.Redis.Pipeline; using ServiceStack.Text; namespace ServiceStack.Redis { - public partial class RedisClient - : IRedisClient - { + public partial class RedisClient + : IRedisClient + { public IEnumerable GetSlowlog(int? numberOfRecords = null) { var data = Slowlog(numberOfRecords); + return ParseSlowlog(data); + } + + private static SlowlogItem[] ParseSlowlog(object[] data) + { var list = new SlowlogItem[data.Length]; - for(int i = 0; i < data.Length; i++) + for (int i = 0; i < data.Length; i++) { var log = (object[])data[i]; - var arguments = ((object[]) log[3]).OfType() + var arguments = ((object[])log[3]).OfType() .Select(t => t.FromUtf8Bytes()) .ToArray(); list[i] = new SlowlogItem( - int.Parse((string) log[0], CultureInfo.InvariantCulture), - DateTimeExtensions.FromUnixTime(int.Parse((string) log[1], CultureInfo.InvariantCulture)), - int.Parse((string) log[2], CultureInfo.InvariantCulture), + Int32.Parse((string)log[0], CultureInfo.InvariantCulture), + DateTimeExtensions.FromUnixTime(Int32.Parse((string)log[1], CultureInfo.InvariantCulture)), + Int32.Parse((string)log[2], CultureInfo.InvariantCulture), arguments ); } @@ -49,22 +50,6 @@ public IEnumerable GetSlowlog(int? numberOfRecords = null) return list; } - - } - - public class SlowlogItem - { - public SlowlogItem(int id, DateTime timeStamp, int duration, string [] arguments) - { - Id = id; - Timestamp = timeStamp; - Duration = duration; - Arguments = arguments; - } - public int Id { get; private set; } - public int Duration { get; private set; } - public DateTime Timestamp { get; private set; } - public string[] Arguments { get; private set; } } } diff --git a/src/ServiceStack.Redis/RedisClient_SortedSet.Async.cs b/src/ServiceStack.Redis/RedisClient_SortedSet.Async.cs new file mode 100644 index 00000000..b3a89ff3 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_SortedSet.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient : IRedisClient + { + internal partial class RedisClientSortedSets + : IHasNamed + { + IRedisSortedSetAsync IHasNamed.this[string setId] + { + get => new RedisClientSortedSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_SortedSet.cs b/src/ServiceStack.Redis/RedisClient_SortedSet.cs index f11ab805..55549114 100644 --- a/src/ServiceStack.Redis/RedisClient_SortedSet.cs +++ b/src/ServiceStack.Redis/RedisClient_SortedSet.cs @@ -1,421 +1,467 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Globalization; using System.Linq; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Model; +using ServiceStack.Redis.Pipeline; using ServiceStack.Redis.Support; using ServiceStack.Text; namespace ServiceStack.Redis { - public partial class RedisClient : IRedisClient - { - public IHasNamed SortedSets { get; set; } - - internal class RedisClientSortedSets - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientSortedSets(RedisClient client) - { - this.client = client; - } - - public IRedisSortedSet this[string setId] - { - get - { - return new RedisClientSortedSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - public static double GetLexicalScore(string value) - { - if (string.IsNullOrEmpty(value)) - return 0; - - var lexicalValue = 0; - if (value.Length >= 1) - lexicalValue += value[0] * (int)Math.Pow(256, 3); - - if (value.Length >= 2) - lexicalValue += value[1] * (int)Math.Pow(256, 2); - - if (value.Length >= 3) - lexicalValue += value[2] * (int)Math.Pow(256, 1); - - if (value.Length >= 4) - lexicalValue += value[3]; - - return lexicalValue; - } - - public bool AddItemToSortedSet(string setId, string value) - { - return AddItemToSortedSet(setId, value, GetLexicalScore(value)); - } - - public bool AddItemToSortedSet(string setId, string value, double score) - { - return base.ZAdd(setId, score, value.ToUtf8Bytes()) == Success; - } - - public bool AddItemToSortedSet(string setId, string value, long score) - { - return base.ZAdd(setId, score, value.ToUtf8Bytes()) == Success; - } - - public bool AddRangeToSortedSet(string setId, List values, double score) - { - var pipeline = CreatePipelineCommand(); - var uSetId = setId.ToUtf8Bytes(); - var uScore = score.ToFastUtf8Bytes(); - - foreach (var value in values) - { - pipeline.WriteCommand(Commands.ZAdd, uSetId, uScore, value.ToUtf8Bytes()); - } - - pipeline.Flush(); - - var success = pipeline.ReadAllAsIntsHaveSuccess(); - return success; - } - - public bool AddRangeToSortedSet(string setId, List values, long score) - { - var pipeline = CreatePipelineCommand(); - var uSetId = setId.ToUtf8Bytes(); - var uScore = score.ToUtf8Bytes(); - - foreach (var value in values) - { - pipeline.WriteCommand(Commands.ZAdd, uSetId, uScore, value.ToUtf8Bytes()); - } - - pipeline.Flush(); - - var success = pipeline.ReadAllAsIntsHaveSuccess(); - return success; - } - - public bool RemoveItemFromSortedSet(string setId, string value) - { - return base.ZRem(setId, value.ToUtf8Bytes()) == Success; - } - - public string PopItemWithLowestScoreFromSortedSet(string setId) - { - //TODO: this should be atomic - var topScoreItemBytes = base.ZRange(setId, FirstElement, 1); - if (topScoreItemBytes.Length == 0) return null; - - base.ZRem(setId, topScoreItemBytes[0]); - return topScoreItemBytes[0].FromUtf8Bytes(); - } - - public string PopItemWithHighestScoreFromSortedSet(string setId) - { - //TODO: this should be atomic - var topScoreItemBytes = base.ZRevRange(setId, FirstElement, 1); - if (topScoreItemBytes.Length == 0) return null; - - base.ZRem(setId, topScoreItemBytes[0]); - return topScoreItemBytes[0].FromUtf8Bytes(); - } - - public bool SortedSetContainsItem(string setId, string value) - { - return base.ZRank(setId, value.ToUtf8Bytes()) != -1; - } - - public double IncrementItemInSortedSet(string setId, string value, double incrementBy) - { - return base.ZIncrBy(setId, incrementBy, value.ToUtf8Bytes()); - } - - public double IncrementItemInSortedSet(string setId, string value, long incrementBy) - { - return base.ZIncrBy(setId, incrementBy, value.ToUtf8Bytes()); - } - - public int GetItemIndexInSortedSet(string setId, string value) - { - return base.ZRank(setId, value.ToUtf8Bytes()); - } - - public int GetItemIndexInSortedSetDesc(string setId, string value) - { - return base.ZRevRank(setId, value.ToUtf8Bytes()); - } - - public List GetAllItemsFromSortedSet(string setId) - { - var multiDataList = base.ZRange(setId, FirstElement, LastElement); - return multiDataList.ToStringList(); - } - - public List GetAllItemsFromSortedSetDesc(string setId) - { - var multiDataList = base.ZRevRange(setId, FirstElement, LastElement); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedSet(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRange(setId, fromRank, toRank); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedSetDesc(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRevRange(setId, fromRank, toRank); - return multiDataList.ToStringList(); - } - - public IDictionary GetAllWithScoresFromSortedSet(string setId) - { - var multiDataList = base.ZRangeWithScores(setId, FirstElement, LastElement); - return CreateSortedScoreMap(multiDataList); - } - - public IDictionary GetRangeWithScoresFromSortedSet(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRangeWithScores(setId, fromRank, toRank); - return CreateSortedScoreMap(multiDataList); - } - - public IDictionary GetRangeWithScoresFromSortedSetDesc(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRevRangeWithScores(setId, fromRank, toRank); - return CreateSortedScoreMap(multiDataList); - } - - private static IDictionary CreateSortedScoreMap(byte[][] multiDataList) - { - var map = new OrderedDictionary(); - - for (var i = 0; i < multiDataList.Length; i += 2) - { - var key = multiDataList[i].FromUtf8Bytes(); - double value; - double.TryParse(multiDataList[i + 1].FromUtf8Bytes(), NumberStyles.Any, CultureInfo.InvariantCulture, out value); - map[key] = value; - } - - return map; - } - - - public List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, null, null); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore) - { - return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, long fromScore, long toScore) - { - return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRangeByScore(setId, fromScore, toScore, skip, take); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, long fromScore, long toScore, int? skip, int? take) - { - var multiDataList = base.ZRangeByScore(setId, fromScore, toScore, skip, take); - return multiDataList.ToStringList(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore) - { - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, long fromScore, long toScore) - { - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRangeByScoreWithScores(setId, fromScore, toScore, skip, take); - return CreateSortedScoreMap(multiDataList); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, long fromScore, long toScore, int? skip, int? take) - { - var multiDataList = base.ZRangeByScoreWithScores(setId, fromScore, toScore, skip, take); - return CreateSortedScoreMap(multiDataList); - } - - - public List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeFromSortedSetByHighestScore(setId, fromStringScore, toStringScore, null, null); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, skip, take); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore) - { - return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, long fromScore, long toScore) - { - return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRevRangeByScore(setId, fromScore, toScore, skip, take); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, long fromScore, long toScore, int? skip, int? take) - { - var multiDataList = base.ZRevRangeByScore(setId, fromScore, toScore, skip, take); - return multiDataList.ToStringList(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromStringScore, toStringScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, skip, take); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore) - { - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, long fromScore, long toScore) - { - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRevRangeByScoreWithScores(setId, fromScore, toScore, skip, take); - return CreateSortedScoreMap(multiDataList); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, long fromScore, long toScore, int? skip, int? take) - { - var multiDataList = base.ZRevRangeByScoreWithScores(setId, fromScore, toScore, skip, take); - return CreateSortedScoreMap(multiDataList); - } - - - - public int RemoveRangeFromSortedSet(string setId, int minRank, int maxRank) - { - return base.ZRemRangeByRank(setId, minRank, maxRank); - } - - public int RemoveRangeFromSortedSetByScore(string setId, double fromScore, double toScore) - { - return base.ZRemRangeByScore(setId, fromScore, toScore); - } - - public int RemoveRangeFromSortedSetByScore(string setId, long fromScore, long toScore) - { - return base.ZRemRangeByScore(setId, fromScore, toScore); - } - - public int GetSortedSetCount(string setId) - { - return base.ZCard(setId); - } - - public int GetSortedSetCount(string setId, string fromStringScore, string toStringScore) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetSortedSetCount(setId, fromScore, toScore); - } - - public int GetSortedSetCount(string setId, double fromScore, double toScore) - { - return base.ZCount(setId, fromScore, toScore); - } - - public int GetSortedSetCount(string setId, long fromScore, long toScore) - { - return base.ZCount(setId, fromScore, toScore); - } - - public double GetItemScoreInSortedSet(string setId, string value) - { - return base.ZScore(setId, value.ToUtf8Bytes()); - } - - public int StoreIntersectFromSortedSets(string intoSetId, params string[] setIds) - { - return base.ZInterStore(intoSetId, setIds); - } - - public int StoreUnionFromSortedSets(string intoSetId, params string[] setIds) - { - return base.ZUnionStore(intoSetId, setIds); - } - } + public partial class RedisClient : IRedisClient + { + public IHasNamed SortedSets { get; set; } + + internal partial class RedisClientSortedSets + : IHasNamed + { + private readonly RedisClient client; + + public RedisClientSortedSets(RedisClient client) + { + this.client = client; + } + + public IRedisSortedSet this[string setId] + { + get + { + return new RedisClientSortedSet(client, setId); + } + set + { + var col = this[setId]; + col.Clear(); + col.CopyTo(value.ToArray(), 0); + } + } + } + + public static double GetLexicalScore(string value) + { + if (String.IsNullOrEmpty(value)) + return 0; + + var lexicalValue = 0; + if (value.Length >= 1) + lexicalValue += value[0] * (int)Math.Pow(256, 3); + + if (value.Length >= 2) + lexicalValue += value[1] * (int)Math.Pow(256, 2); + + if (value.Length >= 3) + lexicalValue += value[2] * (int)Math.Pow(256, 1); + + if (value.Length >= 4) + lexicalValue += value[3]; + + return lexicalValue; + } + + public bool AddItemToSortedSet(string setId, string value) + { + return AddItemToSortedSet(setId, value, GetLexicalScore(value)); + } + + public bool AddItemToSortedSet(string setId, string value, double score) + { + return base.ZAdd(setId, score, value.ToUtf8Bytes()) == Success; + } + + public bool AddItemToSortedSet(string setId, string value, long score) + { + return base.ZAdd(setId, score, value.ToUtf8Bytes()) == Success; + } + + public bool AddRangeToSortedSet(string setId, List values, double score) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToFastUtf8Bytes()); + pipeline.Flush(); + + return pipeline.ReadAllAsIntsHaveSuccess(); + } + + public bool AddRangeToSortedSet(string setId, List values, long score) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToUtf8Bytes()); + pipeline.Flush(); + + return pipeline.ReadAllAsIntsHaveSuccess(); + } + RedisPipelineCommand AddRangeToSortedSetPrepareNonFlushed(string setId, List values, byte[] uScore) + { + var pipeline = CreatePipelineCommand(); + var uSetId = setId.ToUtf8Bytes(); + + foreach (var value in values) + { + pipeline.WriteCommand(Commands.ZAdd, uSetId, uScore, value.ToUtf8Bytes()); + } + return pipeline; + } + + public bool RemoveItemFromSortedSet(string setId, string value) + { + return base.ZRem(setId, value.ToUtf8Bytes()) == Success; + } + + public long RemoveItemsFromSortedSet(string setId, List values) + { + return base.ZRem(setId, values.Map(x => x.ToUtf8Bytes()).ToArray()); + } + + public string PopItemWithLowestScoreFromSortedSet(string setId) + { + //TODO: this should be atomic + var topScoreItemBytes = base.ZRange(setId, FirstElement, 1); + if (topScoreItemBytes.Length == 0) return null; + + base.ZRem(setId, topScoreItemBytes[0]); + return topScoreItemBytes[0].FromUtf8Bytes(); + } + + public string PopItemWithHighestScoreFromSortedSet(string setId) + { + //TODO: this should be atomic + var topScoreItemBytes = base.ZRevRange(setId, FirstElement, 1); + if (topScoreItemBytes.Length == 0) return null; + + base.ZRem(setId, topScoreItemBytes[0]); + return topScoreItemBytes[0].FromUtf8Bytes(); + } + + public bool SortedSetContainsItem(string setId, string value) + { + return base.ZRank(setId, value.ToUtf8Bytes()) != -1; + } + + public double IncrementItemInSortedSet(string setId, string value, double incrementBy) + { + return base.ZIncrBy(setId, incrementBy, value.ToUtf8Bytes()); + } + + public double IncrementItemInSortedSet(string setId, string value, long incrementBy) + { + return base.ZIncrBy(setId, incrementBy, value.ToUtf8Bytes()); + } + + public long GetItemIndexInSortedSet(string setId, string value) + { + return base.ZRank(setId, value.ToUtf8Bytes()); + } + + public long GetItemIndexInSortedSetDesc(string setId, string value) + { + return base.ZRevRank(setId, value.ToUtf8Bytes()); + } + + public List GetAllItemsFromSortedSet(string setId) + { + var multiDataList = base.ZRange(setId, FirstElement, LastElement); + return multiDataList.ToStringList(); + } + + public List GetAllItemsFromSortedSetDesc(string setId) + { + var multiDataList = base.ZRevRange(setId, FirstElement, LastElement); + return multiDataList.ToStringList(); + } + + public List GetRangeFromSortedSet(string setId, int fromRank, int toRank) + { + var multiDataList = base.ZRange(setId, fromRank, toRank); + return multiDataList.ToStringList(); + } + + public List GetRangeFromSortedSetDesc(string setId, int fromRank, int toRank) + { + var multiDataList = base.ZRevRange(setId, fromRank, toRank); + return multiDataList.ToStringList(); + } + + public IDictionary GetAllWithScoresFromSortedSet(string setId) + { + var multiDataList = base.ZRangeWithScores(setId, FirstElement, LastElement); + return CreateSortedScoreMap(multiDataList); + } + + public IDictionary GetRangeWithScoresFromSortedSet(string setId, int fromRank, int toRank) + { + var multiDataList = base.ZRangeWithScores(setId, fromRank, toRank); + return CreateSortedScoreMap(multiDataList); + } + + public IDictionary GetRangeWithScoresFromSortedSetDesc(string setId, int fromRank, int toRank) + { + var multiDataList = base.ZRevRangeWithScores(setId, fromRank, toRank); + return CreateSortedScoreMap(multiDataList); + } + + private static IDictionary CreateSortedScoreMap(byte[][] multiDataList) + { + var map = new OrderedDictionary(); + + for (var i = 0; i < multiDataList.Length; i += 2) + { + var key = multiDataList[i].FromUtf8Bytes(); + double value; + Double.TryParse(multiDataList[i + 1].FromUtf8Bytes(), NumberStyles.Any, CultureInfo.InvariantCulture, out value); + map[key] = value; + } + + return map; + } + + + public List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore) + { + return GetRangeFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, null, null); + } + + public List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); + } + + public List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore) + { + return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); + } + + public List GetRangeFromSortedSetByLowestScore(string setId, long fromScore, long toScore) + { + return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); + } + + public List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take) + { + var multiDataList = base.ZRangeByScore(setId, fromScore, toScore, skip, take); + return multiDataList.ToStringList(); + } + + public List GetRangeFromSortedSetByLowestScore(string setId, long fromScore, long toScore, int? skip, int? take) + { + var multiDataList = base.ZRangeByScore(setId, fromScore, toScore, skip, take); + return multiDataList.ToStringList(); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore) + { + return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, null, null); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore) + { + return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, long fromScore, long toScore) + { + return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take) + { + var multiDataList = base.ZRangeByScoreWithScores(setId, fromScore, toScore, skip, take); + return CreateSortedScoreMap(multiDataList); + } + + public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, long fromScore, long toScore, int? skip, int? take) + { + var multiDataList = base.ZRangeByScoreWithScores(setId, fromScore, toScore, skip, take); + return CreateSortedScoreMap(multiDataList); + } + + + public List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore) + { + return GetRangeFromSortedSetByHighestScore(setId, fromStringScore, toStringScore, null, null); + } + + public List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, skip, take); + } + + public List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore) + { + return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); + } + + public List GetRangeFromSortedSetByHighestScore(string setId, long fromScore, long toScore) + { + return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); + } + + public List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take) + { + var multiDataList = base.ZRevRangeByScore(setId, fromScore, toScore, skip, take); + return multiDataList.ToStringList(); + } + + public List GetRangeFromSortedSetByHighestScore(string setId, long fromScore, long toScore, int? skip, int? take) + { + var multiDataList = base.ZRevRangeByScore(setId, fromScore, toScore, skip, take); + return multiDataList.ToStringList(); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore) + { + return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromStringScore, toStringScore, null, null); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, skip, take); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore) + { + return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, long fromScore, long toScore) + { + return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take) + { + var multiDataList = base.ZRevRangeByScoreWithScores(setId, fromScore, toScore, skip, take); + return CreateSortedScoreMap(multiDataList); + } + + public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, long fromScore, long toScore, int? skip, int? take) + { + var multiDataList = base.ZRevRangeByScoreWithScores(setId, fromScore, toScore, skip, take); + return CreateSortedScoreMap(multiDataList); + } + + + + public long RemoveRangeFromSortedSet(string setId, int minRank, int maxRank) + { + return base.ZRemRangeByRank(setId, minRank, maxRank); + } + + public long RemoveRangeFromSortedSetByScore(string setId, double fromScore, double toScore) + { + return base.ZRemRangeByScore(setId, fromScore, toScore); + } + + public long RemoveRangeFromSortedSetByScore(string setId, long fromScore, long toScore) + { + return base.ZRemRangeByScore(setId, fromScore, toScore); + } + + public long GetSortedSetCount(string setId) + { + return base.ZCard(setId); + } + + public long GetSortedSetCount(string setId, string fromStringScore, string toStringScore) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return GetSortedSetCount(setId, fromScore, toScore); + } + + public long GetSortedSetCount(string setId, double fromScore, double toScore) + { + return base.ZCount(setId, fromScore, toScore); + } + + public long GetSortedSetCount(string setId, long fromScore, long toScore) + { + return base.ZCount(setId, fromScore, toScore); + } + + public double GetItemScoreInSortedSet(string setId, string value) + { + return base.ZScore(setId, value.ToUtf8Bytes()); + } + + public long StoreIntersectFromSortedSets(string intoSetId, params string[] setIds) + { + return base.ZInterStore(intoSetId, setIds); + } + + public long StoreIntersectFromSortedSets(string intoSetId, string[] setIds, string[] args) + { + return base.ZInterStore(intoSetId, setIds, args); + } + + public long StoreUnionFromSortedSets(string intoSetId, params string[] setIds) + { + return base.ZUnionStore(intoSetId, setIds); + } + + public long StoreUnionFromSortedSets(string intoSetId, string[] setIds, string[] args) + { + return base.ZUnionStore(intoSetId, setIds, args); + } + + private static string GetSearchStart(string start) + { + return start == null + ? "-" + : start.IndexOfAny("[", "(", "-") != 0 + ? "[" + start + : start; + } + + private static string GetSearchEnd(string end) + { + return end == null + ? "+" + : end.IndexOfAny("[", "(", "+") != 0 + ? "[" + end + : end; + } + + public List SearchSortedSet(string setId, string start = null, string end = null, int? skip = null, int? take = null) + { + start = GetSearchStart(start); + end = GetSearchEnd(end); + + var ret = base.ZRangeByLex(setId, start, end, skip, take); + return ret.ToStringList(); + } + + public long SearchSortedSetCount(string setId, string start = null, string end = null) + { + return base.ZLexCount(setId, GetSearchStart(start), GetSearchEnd(end)); + } + + public long RemoveRangeFromSortedSetBySearch(string setId, string start = null, string end = null) + { + return base.ZRemRangeByLex(setId, GetSearchStart(start), GetSearchEnd(end)); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientsManagerExtensions.Async.cs b/src/ServiceStack.Redis/RedisClientsManagerExtensions.Async.cs new file mode 100644 index 00000000..123396ac --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientsManagerExtensions.Async.cs @@ -0,0 +1,120 @@ +using ServiceStack.Caching; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + /// + /// Useful wrapper IRedisClientsManager to cut down the boiler plate of most IRedisClient access + /// + public static partial class RedisClientsManagerExtensions + { + ///// + ///// Creates a PubSubServer that uses a background thread to listen and process for + ///// Redis Pub/Sub messages published to the specified channel. + ///// Use optional callbacks to listen for message, error and life-cycle events. + ///// Callbacks can be assigned later, then call Start() for PubSubServer to start listening for messages + ///// + //public static IRedisPubSubServer CreatePubSubServer(this IRedisClientsManager redisManager, + // string channel, + // Action onMessage = null, + // Action onError = null, + // Action onInit = null, + // Action onStart = null, + // Action onStop = null) + //{ + // return new RedisPubSubServer(redisManager, channel) + // { + // OnMessage = onMessage, + // OnError = onError, + // OnInit = onInit, + // OnStart = onStart, + // OnStop = onStop, + // }; + //} + + private static T InvalidAsyncClient(IRedisClientsManager manager, string method) where T : class + => throw new NotSupportedException($"The client returned from '{manager?.GetType().FullName ?? "(null)"}.{method}()' does not implement {typeof(T).Name}"); + + public static ValueTask GetClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetClientAsync(token) + : (redisManager.GetClient() as IRedisClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetClient))).AsValueTaskResult(); + } + + public static ValueTask GetReadOnlyClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetReadOnlyClientAsync(token) + : (redisManager.GetReadOnlyClient() as IRedisClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetReadOnlyClient))).AsValueTaskResult(); + } + + public static ValueTask GetCacheClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetCacheClientAsync(token) + : (redisManager.GetCacheClient() as ICacheClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetCacheClient))).AsValueTaskResult(); + } + + public static ValueTask GetReadOnlyCacheClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetReadOnlyCacheClientAsync(token) + : (redisManager.GetReadOnlyCacheClient() as ICacheClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetCacheClient))).AsValueTaskResult(); + } + + + public static async ValueTask ExecAsync(this IRedisClientsManager redisManager, Func lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + await lambda(redis).ConfigureAwait(false); + } + + public static async ValueTask ExecAsync(this IRedisClientsManager redisManager, Func> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis).ConfigureAwait(false); + } + + //public static void ExecTrans(this IRedisClientsManager redisManager, Action lambda) + //{ + // using (var redis = redisManager.GetClient()) + // using (var trans = redis.CreateTransaction()) + // { + // lambda(trans); + + // trans.Commit(); + // } + //} + + public static async ValueTask ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + await lambda(redis.As()).ConfigureAwait(false); + } + + public static async ValueTask ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis.As()).ConfigureAwait(false); + } + + public static async ValueTask> ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask>> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis.As()).ConfigureAwait(false); + } + + public static async ValueTask> ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask>> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis.As()).ConfigureAwait(false); + } + } + +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs b/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs index 3a6f07ef..6f62b84b 100644 --- a/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs +++ b/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs @@ -5,10 +5,34 @@ namespace ServiceStack.Redis { /// - /// Useful wrapper IRedisClientsManager to cut down the boiler plat of most IRedisClient access + /// Useful wrapper IRedisClientsManager to cut down the boiler plate of most IRedisClient access /// - public static class RedisClientsManagerExtensions + public static partial class RedisClientsManagerExtensions { + /// + /// Creates a PubSubServer that uses a background thread to listen and process for + /// Redis Pub/Sub messages published to the specified channel. + /// Use optional callbacks to listen for message, error and life-cycle events. + /// Callbacks can be assigned later, then call Start() for PubSubServer to start listening for messages + /// + public static IRedisPubSubServer CreatePubSubServer(this IRedisClientsManager redisManager, + string channel, + Action onMessage = null, + Action onError = null, + Action onInit = null, + Action onStart = null, + Action onStop = null) + { + return new RedisPubSubServer(redisManager, channel) + { + OnMessage = onMessage, + OnError = onError, + OnInit = onInit, + OnStart = onStart, + OnStop = onStop, + }; + } + public static void Exec(this IRedisClientsManager redisManager, Action lambda) { using (var redis = redisManager.GetClient()) @@ -71,36 +95,32 @@ public static void ExecTrans(this IRedisClientsManager redisManager, Action(this IRedisClientsManager redisManager, Action> lambda) { using (var redis = redisManager.GetClient()) - using (var typedRedis = redis.GetTypedClient()) { - lambda(typedRedis); + lambda(redis.As()); } } public static T ExecAs(this IRedisClientsManager redisManager, Func, T> lambda) { using (var redis = redisManager.GetClient()) - using (var typedRedis = redis.GetTypedClient()) { - return lambda(typedRedis); + return lambda(redis.As()); } } public static IList ExecAs(this IRedisClientsManager redisManager, Func, IList> lambda) { using (var redis = redisManager.GetClient()) - using (var typedRedis = redis.GetTypedClient()) { - return lambda(typedRedis); + return lambda(redis.As()); } } public static List ExecAs(this IRedisClientsManager redisManager, Func, List> lambda) { using (var redis = redisManager.GetClient()) - using (var typedRedis = redis.GetTypedClient()) { - return lambda(typedRedis); + return lambda(redis.As()); } } } diff --git a/src/ServiceStack.Redis/RedisConfig.cs b/src/ServiceStack.Redis/RedisConfig.cs new file mode 100644 index 00000000..da28ffb5 --- /dev/null +++ b/src/ServiceStack.Redis/RedisConfig.cs @@ -0,0 +1,156 @@ +using System; +using System.Net.Security; +using System.Threading; + +namespace ServiceStack.Redis +{ + public class RedisConfig + { + //redis-server defaults: + public const long DefaultDb = 0; + public const int DefaultPort = 6379; + public const int DefaultPortSsl = 6380; + public const int DefaultPortSentinel = 26379; + public const string DefaultHost = "localhost"; + + /// + /// Factory used to Create `RedisClient` instances + /// + public static Func ClientFactory = c => + { + Interlocked.Increment(ref RedisState.TotalClientsCreated); + return new RedisClient(c); + }; + + /// + /// The default RedisClient Socket ConnectTimeout (default -1, None) + /// + public static int DefaultConnectTimeout = -1; + + /// + /// The default RedisClient Socket SendTimeout (default -1, None) + /// + public static int DefaultSendTimeout = -1; + + /// + /// The default RedisClient Socket ReceiveTimeout (default -1, None) + /// + public static int DefaultReceiveTimeout = -1; + + /// + /// Default Idle TimeOut before a connection is considered to be stale (default 240 secs) + /// + public static int DefaultIdleTimeOutSecs = 240; + + /// + /// The default RetryTimeout for auto retry of failed operations (default 10,000ms) + /// + public static int DefaultRetryTimeout = 10 * 1000; + + /// + /// Default Max Pool Size for Pooled Redis Client Managers (default none) + /// + public static int? DefaultMaxPoolSize; + + /// + /// The default pool size multiplier if no pool size is specified (default 50) + /// + public static int DefaultPoolSizeMultiplier = 50; + + /// + /// The BackOff multiplier failed Auto Retries starts from (default 10ms) + /// + public static int BackOffMultiplier = 10; + + /// + /// The Byte Buffer Size to combine Redis Operations within (1450 bytes) + /// + public static int BufferLength => ServiceStack.Text.Pools.BufferPool.BUFFER_LENGTH; + + /// + /// The Byte Buffer Size for Operations to use a byte buffer pool (default 500kb) + /// + public static int BufferPoolMaxSize = 500000; + + /// + /// Batch size of keys to include in a single Redis Command (e.g. DEL k1 k2...) + /// + public static int CommandKeysBatchSize = 10000; + + /// + /// Whether Connections to Master hosts should be verified they're still master instances (default true) + /// + public static bool VerifyMasterConnections = true; + + /// + /// Whether to retry re-connecting on same connection if not a master instance (default true) + /// For Managed Services (e.g. AWS ElastiCache) which eventually restores master instances on same host + /// + public static bool RetryReconnectOnFailedMasters = true; + + /// + /// The ConnectTimeout on clients used to find the next available host (default 200ms) + /// + public static int HostLookupTimeoutMs = 200; + + /// + /// Skip ServerVersion Checks by specifying Min Version number, e.g: 2.8.12 => 2812, 2.9.1 => 2910 + /// + public static int? AssumeServerVersion; + + /// + /// How long to hold deactivated clients for before disposing their connection (default 0 seconds) + /// Dispose of deactivated Clients immediately with TimeSpan.Zero + /// + public static TimeSpan DeactivatedClientsExpiry = TimeSpan.Zero; + + /// + /// Whether Debug Logging should log detailed Redis operations (default false) + /// + public static bool EnableVerboseLogging = false; + + [Obsolete("Use EnableVerboseLogging")] + public static bool DisableVerboseLogging + { + get => !EnableVerboseLogging; + set => EnableVerboseLogging = !value; + } + + //Example at: http://msdn.microsoft.com/en-us/library/office/dd633677(v=exchg.80).aspx + public static LocalCertificateSelectionCallback CertificateSelectionCallback { get; set; } + public static RemoteCertificateValidationCallback CertificateValidationCallback { get; set; } + + /// + /// Assert all access using pooled RedisClient instance should be limited to same thread. + /// Captures StackTrace so is very slow, use only for debugging connection issues. + /// + public static bool AssertAccessOnlyOnSameThread = false; + + /// + /// Resets Redis Config and Redis Stats back to default values + /// + public static void Reset() + { + RedisStats.Reset(); + + DefaultConnectTimeout = -1; + DefaultSendTimeout = -1; + DefaultReceiveTimeout = -1; + DefaultRetryTimeout = 10 * 1000; + DefaultIdleTimeOutSecs = 240; + DefaultMaxPoolSize = null; + BackOffMultiplier = 10; + BufferPoolMaxSize = 500000; + CommandKeysBatchSize = 10000; + VerifyMasterConnections = true; + RetryReconnectOnFailedMasters = true; + HostLookupTimeoutMs = 200; + AssumeServerVersion = null; + DeactivatedClientsExpiry = TimeSpan.Zero; + EnableVerboseLogging = false; + CertificateSelectionCallback = null; + CertificateValidationCallback = null; + AssertAccessOnlyOnSameThread = false; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisDataExtensions.cs b/src/ServiceStack.Redis/RedisDataExtensions.cs new file mode 100644 index 00000000..5ca8c2fd --- /dev/null +++ b/src/ServiceStack.Redis/RedisDataExtensions.cs @@ -0,0 +1,47 @@ +using System.Collections.Generic; +using System.Globalization; + +namespace ServiceStack.Redis +{ + public static class RedisDataExtensions + { + public static RedisText ToRedisText(this RedisData data) + { + if (data == null) return null; //In Transaction + + var to = new RedisText(); + + if (data.Data != null) + to.Text = data.Data.FromUtf8Bytes(); + + if (data.Children != null) + to.Children = data.Children.ConvertAll(x => x.ToRedisText()); + + return to; + } + + public static double ToDouble(this RedisData data) + => double.Parse(data.Data.FromUtf8Bytes(), + NumberStyles.Float, + CultureInfo.InvariantCulture); + + public static long ToInt64(this RedisData data) + => long.Parse(data.Data.FromUtf8Bytes(), + NumberStyles.Integer, + CultureInfo.InvariantCulture); + + public static string GetResult(this RedisText from) => from.Text; + + public static T GetResult(this RedisText from) => from.Text.FromJson(); + + public static List GetResults(this RedisText from) + => from.Children == null + ? new List() + : from.Children.ConvertAll(x => x.Text); + + public static List GetResults(this RedisText from) + => from.Children == null + ? new List() + : from.Children.ConvertAll(x => x.Text.FromJson()); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisDataInfoExtensions.cs b/src/ServiceStack.Redis/RedisDataInfoExtensions.cs new file mode 100644 index 00000000..fcb20043 --- /dev/null +++ b/src/ServiceStack.Redis/RedisDataInfoExtensions.cs @@ -0,0 +1,75 @@ +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Linq; + +namespace ServiceStack.Redis +{ + public static class RedisDataInfoExtensions + { + public static String ToJsonInfo(this RedisText redisText) + { + var source = redisText.GetResult(); + return Parse(source); + } + + #region Private + + private static String Parse(String source) + { + var result = new Dictionary>(); + var section = new Dictionary(); + + var rows = SplitRows(source); + + foreach (var row in rows) + { + if (row.IndexOf("#", StringComparison.Ordinal) == 0) + { + var name = ParseSection(row); + section = new Dictionary(); + result.Add(name, section); + } + else + { + var pair = ParseKeyValue(row); + if (pair.HasValue) + { + section.Add(pair.Value.Key, pair.Value.Value); + } + } + } + + return JsonSerializer.SerializeToString(result); + } + + private static IEnumerable SplitRows(String source) + { + return source.Split(new[] { "\r\n" }, StringSplitOptions.None).Where(n => !String.IsNullOrWhiteSpace(n)); + } + + private static String ParseSection(String source) + { + return (source.IndexOf("#", StringComparison.Ordinal) == 0) + ? source.Trim('#').Trim() + : String.Empty; + } + + private static KeyValuePair? ParseKeyValue(String source) + { + KeyValuePair? result = null; + + var devider = source.IndexOf(":", StringComparison.Ordinal); + if (devider > 0) + { + var name = source.Substring(0, devider); + var value = source.Substring(devider + 1); + result = new KeyValuePair(name.Trim(), value.Trim()); + } + + return result; + } + + #endregion Private + } +} diff --git a/src/ServiceStack.Redis/RedisEndpoint.cs b/src/ServiceStack.Redis/RedisEndpoint.cs index d3a072e8..b43da7da 100644 --- a/src/ServiceStack.Redis/RedisEndpoint.cs +++ b/src/ServiceStack.Redis/RedisEndpoint.cs @@ -1,19 +1,131 @@ -using ServiceStack.Common.Web; +using System; +using System.Collections.Generic; +using System.ComponentModel; +using System.Security.Authentication; +using System.Text; +using ServiceStack.IO; +using ServiceStack.Text; namespace ServiceStack.Redis { - public class RedisEndPoint : EndPoint + public class RedisEndpoint : IEndpoint { - public RedisEndPoint(string host, int port) : base(host, port) + public RedisEndpoint() { + Host = RedisConfig.DefaultHost; + Port = RedisConfig.DefaultPort; + Db = RedisConfig.DefaultDb; + + ConnectTimeout = RedisConfig.DefaultConnectTimeout; + SendTimeout = RedisConfig.DefaultSendTimeout; + ReceiveTimeout = RedisConfig.DefaultReceiveTimeout; + RetryTimeout = RedisConfig.DefaultRetryTimeout; + IdleTimeOutSecs = RedisConfig.DefaultIdleTimeOutSecs; } - public RedisEndPoint(string host, int port, string password) : this(host,port) + public RedisEndpoint(string host, int port, string password = null, long db = RedisConfig.DefaultDb) + : this() { + this.Host = host; + this.Port = port; this.Password = password; + this.Db = db; } + public string Host { get; set; } + public int Port { get; set; } + public bool Ssl { get; set; } + public SslProtocols? SslProtocols {get; set;} + public int ConnectTimeout { get; set; } + public int SendTimeout { get; set; } + public int ReceiveTimeout { get; set; } + public int RetryTimeout { get; set; } + public int IdleTimeOutSecs { get; set; } + public long Db { get; set; } + public string Client { get; set; } public string Password { get; set; } public bool RequiresAuth { get { return !string.IsNullOrEmpty(Password); } } + public string NamespacePrefix { get; set; } + + public override string ToString() + { + var sb = StringBuilderCache.Allocate(); + sb.AppendFormat("{0}:{1}", Host, Port); + + var args = new List(); + if (Client != null) + args.Add("Client=" + Client); + if (Password != null) + args.Add("Password=" + Password.UrlEncode()); + if (Db != RedisConfig.DefaultDb) + args.Add("Db=" + Db); + if (Ssl) + args.Add("Ssl=true"); + if (SslProtocols != null) + args.Add("SslProtocols=" + SslProtocols.ToString()); + if (ConnectTimeout != RedisConfig.DefaultConnectTimeout) + args.Add("ConnectTimeout=" + ConnectTimeout); + if (SendTimeout != RedisConfig.DefaultSendTimeout) + args.Add("SendTimeout=" + SendTimeout); + if (ReceiveTimeout != RedisConfig.DefaultReceiveTimeout) + args.Add("ReceiveTimeout=" + ReceiveTimeout); + if (RetryTimeout != RedisConfig.DefaultRetryTimeout) + args.Add("RetryTimeout=" + RetryTimeout); + if (IdleTimeOutSecs != RedisConfig.DefaultIdleTimeOutSecs) + args.Add("IdleTimeOutSecs=" + IdleTimeOutSecs); + if (NamespacePrefix != null) + args.Add("NamespacePrefix=" + NamespacePrefix.UrlEncode()); + + if (args.Count > 0) + sb.Append("?").Append(string.Join("&", args)); + + return StringBuilderCache.ReturnAndFree(sb); + } + + protected bool Equals(RedisEndpoint other) + { + return string.Equals(Host, other.Host) + && Port == other.Port + && Ssl.Equals(other.Ssl) + && SslProtocols.Equals(other.SslProtocols) + && ConnectTimeout == other.ConnectTimeout + && SendTimeout == other.SendTimeout + && ReceiveTimeout == other.ReceiveTimeout + && RetryTimeout == other.RetryTimeout + && IdleTimeOutSecs == other.IdleTimeOutSecs + && Db == other.Db + && string.Equals(Client, other.Client) + && string.Equals(Password, other.Password) + && string.Equals(NamespacePrefix, other.NamespacePrefix); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((RedisEndpoint)obj); + } + + public override int GetHashCode() + { + unchecked + { + var hashCode = (Host != null ? Host.GetHashCode() : 0); + hashCode = (hashCode * 397) ^ Port; + hashCode = (hashCode * 397) ^ Ssl.GetHashCode(); + hashCode = (hashCode * 397) ^ SslProtocols.GetHashCode(); + hashCode = (hashCode * 397) ^ ConnectTimeout; + hashCode = (hashCode * 397) ^ SendTimeout; + hashCode = (hashCode * 397) ^ ReceiveTimeout; + hashCode = (hashCode * 397) ^ RetryTimeout; + hashCode = (hashCode * 397) ^ IdleTimeOutSecs; + hashCode = (hashCode * 397) ^ Db.GetHashCode(); + hashCode = (hashCode * 397) ^ (Client != null ? Client.GetHashCode() : 0); + hashCode = (hashCode * 397) ^ (Password != null ? Password.GetHashCode() : 0); + hashCode = (hashCode * 397) ^ (NamespacePrefix != null ? NamespacePrefix.GetHashCode() : 0); + return hashCode; + } + } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisExtensions.cs b/src/ServiceStack.Redis/RedisExtensions.cs index 682703e9..b4ba79f7 100644 --- a/src/ServiceStack.Redis/RedisExtensions.cs +++ b/src/ServiceStack.Redis/RedisExtensions.cs @@ -1,70 +1,123 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; using System.Globalization; +using System.Linq; using System.Net.Sockets; -using System.Text; -using ServiceStack.Common.Web; -using ServiceStack.DesignPatterns.Model; +using System.Security.Authentication; +using ServiceStack.Model; using ServiceStack.Text; namespace ServiceStack.Redis { - internal static class RedisExtensions + public static class RedisExtensions { - public static List ToRedisEndPoints(this IEnumerable hosts) + public static List ToRedisEndPoints(this IEnumerable hosts) { - if (hosts == null) return new List(); - - var redisEndpoints = new List(); - foreach (var host in hosts) - { - RedisEndPoint endpoint; - string[] hostParts; - if (host.Contains("@")) - { - hostParts = host.SplitOnLast('@'); - var password = hostParts[0]; - hostParts = hostParts[1].Split(':'); - endpoint = GetRedisEndPoint(hostParts); - endpoint.Password = password; - } - else - { - hostParts = host.Split(':'); - endpoint = GetRedisEndPoint(hostParts); - } - redisEndpoints.Add(endpoint); - } - return redisEndpoints; + return hosts == null + ? new List() + : hosts.Select(x => ToRedisEndpoint(x)).ToList(); } - private static RedisEndPoint GetRedisEndPoint(string[] hostParts) + public static RedisEndpoint ToRedisEndpoint(this string connectionString, int? defaultPort = null) { - const int hostOrIpAddressIndex = 0; - const int portIndex = 1; + if (connectionString == null) + throw new ArgumentNullException("connectionString"); + if (connectionString.StartsWith("redis://")) + connectionString = connectionString.Substring("redis://".Length); + + var domainParts = connectionString.SplitOnLast('@'); + var qsParts = domainParts.Last().SplitOnFirst('?'); + var hostParts = qsParts[0].SplitOnLast(':'); + var useDefaultPort = true; + var port = defaultPort.GetValueOrDefault(RedisConfig.DefaultPort); + if (hostParts.Length > 1) + { + port = int.Parse(hostParts[1]); + useDefaultPort = false; + } + var endpoint = new RedisEndpoint(hostParts[0], port); + if (domainParts.Length > 1) + { + var authParts = domainParts[0].SplitOnFirst(':'); + if (authParts.Length > 1) + endpoint.Client = authParts[0]; - if (hostParts.Length == 0) - throw new ArgumentException("'{0}' is not a valid Host or IP Address: e.g. '127.0.0.0[:11211]'"); + endpoint.Password = authParts.Last(); + } - var port = (hostParts.Length == 1) - ? RedisNativeClient.DefaultPort - : Int32.Parse(hostParts[portIndex]); + if (qsParts.Length > 1) + { + var qsParams = qsParts[1].Split('&'); + foreach (var param in qsParams) + { + var entry = param.Split('='); + var value = entry.Length > 1 ? entry[1].UrlDecode() : null; + if (value == null) continue; + + var name = entry[0].ToLower(); + switch (name) + { + case "db": + endpoint.Db = int.Parse(value); + break; + case "ssl": + endpoint.Ssl = bool.Parse(value); + if (useDefaultPort) + endpoint.Port = RedisConfig.DefaultPortSsl; + break; + case "sslprotocols": + SslProtocols protocols; + value = value?.Replace("|", ","); + if (!Enum.TryParse(value, true, out protocols)) throw new ArgumentOutOfRangeException("Keyword '" + name + "' requires an SslProtocol value (multiple values separated by '|')."); + endpoint.SslProtocols = protocols; + break; + case "client": + endpoint.Client = value; + break; + case "password": + endpoint.Password = value; + break; + case "namespaceprefix": + endpoint.NamespacePrefix = value; + break; + case "connecttimeout": + endpoint.ConnectTimeout = int.Parse(value); + break; + case "sendtimeout": + endpoint.SendTimeout = int.Parse(value); + break; + case "receivetimeout": + endpoint.ReceiveTimeout = int.Parse(value); + break; + case "retrytimeout": + endpoint.RetryTimeout = int.Parse(value); + break; + case "idletimeout": + case "idletimeoutsecs": + endpoint.IdleTimeOutSecs = int.Parse(value); + break; + } + } + } - return new RedisEndPoint(hostParts[hostOrIpAddressIndex], port); + return endpoint; } + } + internal static class RedisExtensionsInternal + { public static bool IsConnected(this Socket socket) { try @@ -101,29 +154,64 @@ public static List ToStringList(this byte[][] multiDataList) return results; } + public static string[] ToStringArray(this byte[][] multiDataList) + { + if (multiDataList == null) + return TypeConstants.EmptyStringArray; + + var to = new string[multiDataList.Length]; + for (int i = 0; i < multiDataList.Length; i++) + { + to[i] = multiDataList[i].FromUtf8Bytes(); + } + return to; + } + + public static Dictionary ToStringDictionary(this byte[][] multiDataList) + { + if (multiDataList == null) + return TypeConstants.EmptyStringDictionary; + + var map = new Dictionary(); + + for (var i = 0; i < multiDataList.Length; i += 2) + { + var key = multiDataList[i].FromUtf8Bytes(); + map[key] = multiDataList[i + 1].FromUtf8Bytes(); + } + + return map; + } + + private static readonly NumberFormatInfo DoubleFormatProvider = new NumberFormatInfo + { + PositiveInfinitySymbol = "+inf", + NegativeInfinitySymbol = "-inf" + }; + public static byte[] ToFastUtf8Bytes(this double value) { - return FastToUtf8Bytes(value.ToString("R", CultureInfo.InvariantCulture)); + return FastToUtf8Bytes(value.ToString("R", DoubleFormatProvider)); } private static byte[] FastToUtf8Bytes(string strVal) { var bytes = new byte[strVal.Length]; for (var i = 0; i < strVal.Length; i++) - bytes[i] = (byte) strVal[i]; + bytes[i] = (byte)strVal[i]; return bytes; } - public static byte[][] ToMultiByteArray(this string[] args) - { - var byteArgs = new byte[args.Length][]; - for (var i = 0; i < args.Length; ++i) - byteArgs[i] = args[i].ToUtf8Bytes(); - return byteArgs; - } + public static byte[][] ToMultiByteArray(this string[] args) + { + var byteArgs = new byte[args.Length][]; + for (var i = 0; i < args.Length; ++i) + byteArgs[i] = args[i].ToUtf8Bytes(); + return byteArgs; + } - public static byte[][] PrependByteArray(this byte[][] args, byte[] valueToPrepend) + public static byte[][] PrependByteArray(this byte[][] args, byte[] valueToPrepend) { var newArgs = new byte[args.Length + 1][]; newArgs[0] = valueToPrepend; @@ -133,7 +221,7 @@ public static byte[][] PrependByteArray(this byte[][] args, byte[] valueToPrepe return newArgs; } - public static byte[][] PrependInt(this byte[][] args, int valueToPrepend) + public static byte[][] PrependInt(this byte[][] args, int valueToPrepend) { return args.PrependByteArray(valueToPrepend.ToUtf8Bytes()); } diff --git a/src/ServiceStack.Redis/RedisLock.Async.cs b/src/ServiceStack.Redis/RedisLock.Async.cs new file mode 100644 index 00000000..8466c11f --- /dev/null +++ b/src/ServiceStack.Redis/RedisLock.Async.cs @@ -0,0 +1,93 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Internal; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public partial class RedisLock + : IAsyncDisposable + { + internal static ValueTask CreateAsync(IRedisClientAsync redisClient, string key, + TimeSpan? timeOut = default, CancellationToken token = default) + { + var obj = new RedisLock(redisClient, key); + return obj.AcquireAsync(timeOut, token).Await(obj); + } + + // async version of ExecUtils.RetryUntilTrue + private static async ValueTask RetryUntilTrue(Func> action, + TimeSpan? timeOut = null, CancellationToken token = default) + { + var i = 0; + var firstAttempt = DateTime.UtcNow; + + while (timeOut == null || DateTime.UtcNow - firstAttempt < timeOut.Value) + { + token.ThrowIfCancellationRequested(); + i++; + if (await action(token).ConfigureAwait(false)) + { + return; + } + await Task.Delay(ExecUtils.CalculateFullJitterBackOffDelay(i)).ConfigureAwait(false); + } + + throw new TimeoutException($"Exceeded timeout of {timeOut.Value}"); + } + + + private async ValueTask AcquireAsync(TimeSpan? timeOut, CancellationToken token) + { + var redisClient = (IRedisClientAsync)untypedClient; + await RetryUntilTrue( // .ConfigureAwait(false) is below + async ct => + { + //This pattern is taken from the redis command for SETNX http://redis.io/commands/setnx + + //Calculate a unix time for when the lock should expire + var realSpan = timeOut ?? new TimeSpan(365, 0, 0, 0); //if nothing is passed in the timeout hold for a year + var expireTime = DateTime.UtcNow.Add(realSpan); + var lockString = (expireTime.ToUnixTimeMs() + 1).ToString(); + + //Try to set the lock, if it does not exist this will succeed and the lock is obtained + var nx = await redisClient.SetValueIfNotExistsAsync(key, lockString, token: ct).ConfigureAwait(false); + if (nx) + return true; + + //If we've gotten here then a key for the lock is present. This could be because the lock is + //correctly acquired or it could be because a client that had acquired the lock crashed (or didn't release it properly). + //Therefore we need to get the value of the lock to see when it should expire + + await redisClient.WatchAsync(new[] { key }, ct).ConfigureAwait(false); + var lockExpireString = await redisClient.GetValueAsync(key, ct).ConfigureAwait(false); + if (!long.TryParse(lockExpireString, out var lockExpireTime)) + { + await redisClient.UnWatchAsync(ct).ConfigureAwait(false); // since the client is scoped externally + return false; + } + + //If the expire time is greater than the current time then we can't let the lock go yet + if (lockExpireTime > DateTime.UtcNow.ToUnixTimeMs()) + { + await redisClient.UnWatchAsync(ct).ConfigureAwait(false); // since the client is scoped externally + return false; + } + + //If the expire time is less than the current time then it wasn't released properly and we can attempt to + //acquire the lock. The above call to Watch(_lockKey) enrolled the key in monitoring, so if it changes + //before we call Commit() below, the Commit will fail and return false, which means that another thread + //was able to acquire the lock before we finished processing. + await using var trans = await redisClient.CreateTransactionAsync(ct).ConfigureAwait(false); + trans.QueueCommand(r => r.SetValueAsync(key, lockString)); + return await trans.CommitAsync(ct).ConfigureAwait(false); //returns false if Transaction failed + }, + timeOut, token + ).ConfigureAwait(false); + } + + ValueTask IAsyncDisposable.DisposeAsync() + => new ValueTask(((IRedisClientAsync)untypedClient).RemoveAsync(key)); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisLock.cs b/src/ServiceStack.Redis/RedisLock.cs index 26c4e59f..abf3a9d9 100644 --- a/src/ServiceStack.Redis/RedisLock.cs +++ b/src/ServiceStack.Redis/RedisLock.cs @@ -1,58 +1,74 @@ -using System; -using ServiceStack.Common; using ServiceStack.Text; +using System; namespace ServiceStack.Redis { - public class RedisLock - : IDisposable - { - private readonly RedisClient redisClient; - private readonly string key; - - public RedisLock(RedisClient redisClient, string key, TimeSpan? timeOut) - { - this.redisClient = redisClient; - this.key = key; - - ExecExtensions.RetryUntilTrue( - () => - { + public partial class RedisLock + : IDisposable + { + private readonly object untypedClient; + private readonly string key; + + private RedisLock(object redisClient, string key) + { + this.untypedClient = redisClient; + this.key = key; + } + + public RedisLock(IRedisClient redisClient, string key, TimeSpan? timeOut) + : this(redisClient, key) + { + ExecUtils.RetryUntilTrue( + () => + { //This pattern is taken from the redis command for SETNX http://redis.io/commands/setnx - + //Calculate a unix time for when the lock should expire - TimeSpan realSpan = timeOut ?? new TimeSpan(365, 0, 0, 0); //if nothing is passed in the timeout hold for a year - DateTime expireTime = DateTime.UtcNow.Add(realSpan); - string lockString = (expireTime.ToUnixTimeMs() + 1).ToString(); - + var realSpan = timeOut ?? new TimeSpan(365, 0, 0, 0); //if nothing is passed in the timeout hold for a year + var expireTime = DateTime.UtcNow.Add(realSpan); + var lockString = (expireTime.ToUnixTimeMs() + 1).ToString(); + //Try to set the lock, if it does not exist this will succeed and the lock is obtained - var nx = redisClient.SetEntryIfNotExists(key, lockString); + var nx = redisClient.SetValueIfNotExists(key, lockString); if (nx) return true; //If we've gotten here then a key for the lock is present. This could be because the lock is //correctly acquired or it could be because a client that had acquired the lock crashed (or didn't release it properly). //Therefore we need to get the value of the lock to see when it should expire - string lockExpireString = redisClient.Get(key); - long lockExpireTime; - if (!long.TryParse(lockExpireString, out lockExpireTime)) + + redisClient.Watch(key); + var lockExpireString = redisClient.Get(key); + if (!long.TryParse(lockExpireString, out var lockExpireTime)) + { + redisClient.UnWatch(); // since the client is scoped externally return false; + } + //If the expire time is greater than the current time then we can't let the lock go yet if (lockExpireTime > DateTime.UtcNow.ToUnixTimeMs()) + { + redisClient.UnWatch(); // since the client is scoped externally return false; + } //If the expire time is less than the current time then it wasn't released properly and we can attempt to - //acquire the lock. This is done by setting the lock to our timeout string AND checking to make sure - //that what is returned is the old timeout string in order to account for a possible race condition. - return redisClient.GetAndSetEntry(key, lockString) == lockExpireString; - }, - timeOut - ); - } - - public void Dispose() - { - redisClient.Remove(key); - } - } + //acquire the lock. The above call to Watch(_lockKey) enrolled the key in monitoring, so if it changes + //before we call Commit() below, the Commit will fail and return false, which means that another thread + //was able to acquire the lock before we finished processing. + using (var trans = redisClient.CreateTransaction()) // we started the "Watch" above; this tx will succeed if the value has not moved + { + trans.QueueCommand(r => r.Set(key, lockString)); + return trans.Commit(); //returns false if Transaction failed + } + }, + timeOut + ); + } + + public void Dispose() + { + ((IRedisClient)untypedClient).Remove(key); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisManagerPool.Async.cs b/src/ServiceStack.Redis/RedisManagerPool.Async.cs new file mode 100644 index 00000000..25406123 --- /dev/null +++ b/src/ServiceStack.Redis/RedisManagerPool.Async.cs @@ -0,0 +1,33 @@ +//Copyright (c) ServiceStack, Inc. All Rights Reserved. +//License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt + +using ServiceStack.Caching; +using ServiceStack.Redis.Internal; +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + public partial class RedisManagerPool + : IRedisClientsManagerAsync + { + ValueTask IRedisClientsManagerAsync.GetCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this).AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetClientAsync(CancellationToken token) + => GetClient(true).AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this) { ReadOnly = true }.AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyClientAsync(CancellationToken token) + => GetClient(true).AsValueTaskResult(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisManagerPool.cs b/src/ServiceStack.Redis/RedisManagerPool.cs new file mode 100644 index 00000000..6394f037 --- /dev/null +++ b/src/ServiceStack.Redis/RedisManagerPool.cs @@ -0,0 +1,451 @@ +//Copyright (c) ServiceStack, Inc. All Rights Reserved. +//License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using ServiceStack.Caching; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + /// + /// Configuration class for the RedisManagerPool + /// + public class RedisPoolConfig + { + /// + /// Default pool size used by every new instance of . (default: 40) + /// + public static int DefaultMaxPoolSize = 40; + + public RedisPoolConfig() + { + // maybe a bit overkill? could be deprecated if you add max int on RedisManagerPool + MaxPoolSize = RedisConfig.DefaultMaxPoolSize ?? DefaultMaxPoolSize; + } + + /// + /// Maximum amount of s created by the . + /// + public int MaxPoolSize { get; set; } + } + + /// + /// Provides thread-safe pooling of redis client connections. All connections are treated as read and write hosts. + /// + public partial class RedisManagerPool + : IRedisClientsManager, IRedisFailover, IHandleClientDispose, IHasRedisResolver, IRedisClientCacheManager + { + private static readonly ILog Log = LogManager.GetLogger(typeof(RedisManagerPool)); + + private const string PoolTimeoutError = + "Redis Timeout expired. The timeout period elapsed prior to obtaining a connection from the pool. This may have occurred because all pooled connections were in use."; + + public int RecheckPoolAfterMs = 100; + + public List> OnFailover { get; private set; } + + private readonly RedisClient[] clients; + protected int poolIndex; + + protected int RedisClientCounter = 0; + + public Func ClientFactory { get; set; } + + public Action ConnectionFilter { get; set; } + + public IRedisResolver RedisResolver { get; set; } + + public int MaxPoolSize { get; private set; } + + public bool AssertAccessOnlyOnSameThread { get; set; } + + public RedisManagerPool() : this(RedisConfig.DefaultHost) { } + public RedisManagerPool(string host) : this(new[] { host }) { } + public RedisManagerPool(string host, RedisPoolConfig config) : this(new[] { host }, config) { } + public RedisManagerPool(IEnumerable hosts) : this(hosts, null) { } + + public RedisManagerPool(IEnumerable hosts, RedisPoolConfig config) + { + if (hosts == null) + throw new ArgumentNullException(nameof(hosts)); + + RedisResolver = new RedisResolver(hosts, null); + + if (config == null) + config = new RedisPoolConfig(); + + this.OnFailover = new List>(); + + this.MaxPoolSize = config.MaxPoolSize; + + clients = new RedisClient[MaxPoolSize]; + poolIndex = 0; + + this.AssertAccessOnlyOnSameThread = RedisConfig.AssertAccessOnlyOnSameThread; + + JsConfig.InitStatics(); + } + + public void FailoverTo(params string[] readWriteHosts) + { + Interlocked.Increment(ref RedisState.TotalFailovers); + + Log.Info($"FailoverTo: {string.Join(",", readWriteHosts)} Total: {RedisState.TotalFailovers}"); + + lock (clients) + { + for (var i = 0; i < clients.Length; i++) + { + var redis = clients[i]; + if (redis != null) + RedisState.DeactivateClient(redis); + + clients[i] = null; + } + RedisResolver.ResetMasters(readWriteHosts); + } + + if (this.OnFailover != null) + { + foreach (var callback in OnFailover) + { + try + { + callback(this); + } + catch (Exception ex) + { + Log.Error("Error firing OnFailover callback(): ", ex); + } + } + } + } + + public void FailoverTo(IEnumerable readWriteHosts, IEnumerable readOnlyHosts) + { + FailoverTo(readWriteHosts.ToArray()); //only use readWriteHosts + } + + /// + /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts + /// + /// + public IRedisClient GetClient() => GetClient(false); + private RedisClient GetClient(bool forAsync) + { + try + { + var inactivePoolIndex = -1; + lock (clients) + { + AssertValidPool(); + + //-1 when no available clients otherwise index of reservedSlot or existing Client + inactivePoolIndex = GetInActiveClient(out var inActiveClient); + + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + poolIndex++; + inActiveClient.Activate(); + + return !AssertAccessOnlyOnSameThread + ? inActiveClient + : inActiveClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); + } + } + + //Reaches here when there's no Valid InActive Clients + try + { + //inactivePoolIndex == -1 || index of reservedSlot || index of invalid client + var existingClient = inactivePoolIndex >= 0 && inactivePoolIndex < clients.Length + ? clients[inactivePoolIndex] + : null; + + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateMasterClient(Math.Max(inactivePoolIndex, 0))); + + //Put all blocking I/O or potential Exceptions before lock + lock (clients) + { + //Create new client outside of pool when max pool size exceeded + //Reverting free-slot not needed when -1 since slow wasn't reserved or + //when existingClient changed (failover) since no longer reserved + var stillReserved = inactivePoolIndex >= 0 && inactivePoolIndex < clients.Length && + clients[inactivePoolIndex] == existingClient; + if (inactivePoolIndex == -1 || !stillReserved) + { + if (Log.IsDebugEnabled) + Log.Debug($"POOL clients[inactivePoolIndex] != existingClient: {(!stillReserved ? "!stillReserved" : "-1")}"); + + Interlocked.Increment(ref RedisState.TotalClientsCreatedOutsidePool); + + //Don't handle callbacks for new client outside pool + newClient.ClientManager = null; + return newClient; + } + + poolIndex++; + clients[inactivePoolIndex] = newClient; + + return (!AssertAccessOnlyOnSameThread || forAsync) + ? newClient + : newClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw (before lock) + lock (clients) + { + if (inactivePoolIndex >= 0 && inactivePoolIndex < clients.Length) + { + clients[inactivePoolIndex] = null; + } + } + throw; + } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } + + public IRedisClient GetReadOnlyClient() + { + return GetClient(false); + } + + class ReservedClient : RedisClient + { + public ReservedClient() + { + this.DeactivatedAt = DateTime.UtcNow; + } + + public override void Dispose() { } + } + + static readonly ReservedClient reservedSlot = new ReservedClient(); + + + /// + /// Called within a lock + /// + /// + private int GetInActiveClient(out RedisClient inactiveClient) + { + //this will loop through all hosts in readClients once even though there are 2 for loops + //both loops are used to try to get the preferred host according to the round robin algorithm + var readWriteTotal = RedisResolver.ReadWriteHostsCount; + var desiredIndex = poolIndex % clients.Length; + for (int x = 0; x < readWriteTotal; x++) + { + var nextHostIndex = (desiredIndex + x) % readWriteTotal; + for (var i = nextHostIndex; i < clients.Length; i += readWriteTotal) + { + if (clients[i] != null && !clients[i].Active && !clients[i].HadExceptions) + { + inactiveClient = clients[i]; + return i; + } + + if (clients[i] == null) + { + clients[i] = reservedSlot; + inactiveClient = null; + return i; + } + + if (clients[i] != reservedSlot && clients[i].HadExceptions) + { + inactiveClient = null; + return i; + } + } + } + inactiveClient = null; + return -1; + } + + private RedisClient InitNewClient(RedisClient client) + { + client.Id = Interlocked.Increment(ref RedisClientCounter); + client.Activate(newClient:true); + client.ClientManager = this; + client.ConnectionFilter = ConnectionFilter; + + return client; + } + + public void DisposeClient(RedisNativeClient client) + { + lock (clients) + { + for (var i = 0; i < clients.Length; i++) + { + var writeClient = clients[i]; + if (client != writeClient) continue; + if (client.IsDisposed) + { + clients[i] = null; + } + else + { + client.TrackThread = null; + client.Deactivate(); + } + + Monitor.PulseAll(clients); + return; + } + } + } + + /// + /// Disposes the write client. + /// + /// The client. + public void DisposeWriteClient(RedisNativeClient client) + { + lock (clients) + { + client.Deactivate(); + } + } + + public Dictionary GetStats() + { + var clientsPoolSize = clients.Length; + var clientsCreated = 0; + var clientsWithExceptions = 0; + var clientsInUse = 0; + var clientsConnected = 0; + + foreach (var client in clients) + { + if (client == null) + { + clientsCreated++; + continue; + } + + if (client.HadExceptions) + clientsWithExceptions++; + if (client.Active) + clientsInUse++; + if (client.IsSocketConnected()) + clientsConnected++; + } + + var ret = new Dictionary + { + {"VersionString", "" + Text.Env.VersionString}, + + {"clientsPoolSize", "" + clientsPoolSize}, + {"clientsCreated", "" + clientsCreated}, + {"clientsWithExceptions", "" + clientsWithExceptions}, + {"clientsInUse", "" + clientsInUse}, + {"clientsConnected", "" + clientsConnected}, + + {"RedisResolver.ReadOnlyHostsCount", "" + RedisResolver.ReadOnlyHostsCount}, + {"RedisResolver.ReadWriteHostsCount", "" + RedisResolver.ReadWriteHostsCount}, + }; + + return ret; + } + + private void AssertValidPool() + { + if (clients.Length < 1) + throw new InvalidOperationException("Need a minimum pool size of 1"); + } + + public int[] GetClientPoolActiveStates() + { + lock (clients) + { + var activeStates = new int[clients.Length]; + for (int i = 0; i < clients.Length; i++) + { + var client = clients[i]; + activeStates[i] = client == null + ? -1 + : client.Active ? 1 : 0; + } + return activeStates; + } + } + + ~RedisManagerPool() + { + Dispose(false); + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + protected virtual void Dispose(bool disposing) + { + if (Interlocked.Increment(ref disposeAttempts) > 1) return; + + if (disposing) + { + // get rid of managed resources + } + + try + { + // get rid of unmanaged resources + foreach (var client in clients) + { + Dispose(client); + } + } + catch (Exception ex) + { + Log.Error("Error when trying to dispose of PooledRedisClientManager", ex); + } + + RedisState.DisposeAllDeactivatedClients(); + } + + private int disposeAttempts = 0; + + protected void Dispose(RedisClient redisClient) + { + if (redisClient == null) return; + try + { + redisClient.DisposeConnection(); + } + catch (Exception ex) + { + Log.Error($"Error when trying to dispose of RedisClient to host {redisClient.Host}:{redisClient.Port}", ex); + } + } + + public ICacheClient GetCacheClient() + { + return new RedisClientManagerCacheClient(this); + } + + public ICacheClient GetReadOnlyCacheClient() + { + return new RedisClientManagerCacheClient(this) { ReadOnly = true }; + } + } +} diff --git a/src/ServiceStack.Redis/RedisNativeClient.Async.cs b/src/ServiceStack.Redis/RedisNativeClient.Async.cs new file mode 100644 index 00000000..d265c0ae --- /dev/null +++ b/src/ServiceStack.Redis/RedisNativeClient.Async.cs @@ -0,0 +1,1487 @@ +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisNativeClient + : IRedisNativeClientAsync + { + internal IRedisPipelineSharedAsync PipelineAsync + => (IRedisPipelineSharedAsync)pipeline; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void AssertNotNull(object obj, string name = "key") + { + if (obj is null) Throw(name); + static void Throw(string name) => throw new ArgumentNullException(name); + } + + private IRedisNativeClientAsync AsAsync() => this; + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + ValueTask IRedisNativeClientAsync.TimeAsync(CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.Time); + + ValueTask IRedisNativeClientAsync.ExistsAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Exists, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SetAsync(string key, byte[] value, bool exists, long expirySeconds, long expiryMilliseconds, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", nameof(value)); + + var entryExists = exists ? Commands.Xx : Commands.Nx; + byte[][] args; + if (expiryMilliseconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Px, expiryMilliseconds.ToUtf8Bytes(), entryExists }; + } + else if (expirySeconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Ex, expirySeconds.ToUtf8Bytes(), entryExists }; + } + else + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, entryExists }; + } + + return IsString(SendExpectStringAsync(token, args), OK); + } + ValueTask IRedisNativeClientAsync.SetAsync(string key, byte[] value, long expirySeconds, long expiryMilliseconds, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", nameof(value)); + + byte[][] args; + if (expiryMilliseconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Px, expiryMilliseconds.ToUtf8Bytes() }; + } + else if (expirySeconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Ex, expirySeconds.ToUtf8Bytes() }; + } + else + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value }; + } + + return SendExpectSuccessAsync(token, args); + } + + ValueTask IRedisNativeClientAsync.GetAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.Get, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DelAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Del, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ScanAsync(ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + return SendExpectScanResultAsync(token, Commands.Scan, cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + + return SendExpectScanResultAsync(token, Commands.Scan, cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.TypeAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectCodeAsync(token, Commands.Type, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RPushAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + + return SendExpectLongAsync(token, Commands.RPush, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SAddAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + + return SendExpectLongAsync(token, Commands.SAdd, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZAddAsync(string setId, double score, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + + return SendExpectLongAsync(token, Commands.ZAdd, setId.ToUtf8Bytes(), score.ToFastUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZAddAsync(string setId, long score, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + + return SendExpectLongAsync(token, Commands.ZAdd, setId.ToUtf8Bytes(), score.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.HSetAsync(string hashId, byte[] key, byte[] value, CancellationToken token) + => HSetAsync(hashId.ToUtf8Bytes(), key, value, token); + + internal ValueTask HSetAsync(byte[] hashId, byte[] key, byte[] value, CancellationToken token = default) + { + AssertHashIdAndKey(hashId, key); + + return SendExpectLongAsync(token, Commands.HSet, hashId, key, value); + } + + ValueTask IRedisNativeClientAsync.RandomKeyAsync(CancellationToken token) + => SendExpectDataAsync(token, Commands.RandomKey).FromUtf8BytesAsync(); + + ValueTask IRedisNativeClientAsync.RenameAsync(string oldKeyName, string newKeyName, CancellationToken token) + { + CheckRenameKeys(oldKeyName, newKeyName); + return SendExpectSuccessAsync(token, Commands.Rename, oldKeyName.ToUtf8Bytes(), newKeyName.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RenameNxAsync(string oldKeyName, string newKeyName, CancellationToken token) + { + CheckRenameKeys(oldKeyName, newKeyName); + return SendExpectLongAsync(token, Commands.RenameNx, oldKeyName.ToUtf8Bytes(), newKeyName.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.MSetAsync(byte[][] keys, byte[][] values, CancellationToken token) + { + var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); + + return SendExpectSuccessAsync(token, keysAndValues); + } + + + ValueTask IRedisNativeClientAsync.MSetAsync(string[] keys, byte[][] values, CancellationToken token) + => ((IRedisNativeClientAsync)this).MSetAsync(keys.ToMultiByteArray(), values, token); + + ValueTask IRedisNativeClientAsync.SelectAsync(long db, CancellationToken token) + { + this.db = db; + return SendExpectSuccessAsync(token, Commands.Select, db.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DelAsync(string[] keys, CancellationToken token) + { + AssertNotNull(keys, nameof(keys)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.Del, keys); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ExpireAsync(string key, int seconds, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Expire, key.ToUtf8Bytes(), seconds.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PExpireAsync(string key, long ttlMs, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.PExpire, key.ToUtf8Bytes(), ttlMs.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.ExpireAtAsync(string key, long unixTime, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.ExpireAt, key.ToUtf8Bytes(), unixTime.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PExpireAtAsync(string key, long unixTimeMs, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.PExpireAt, key.ToUtf8Bytes(), unixTimeMs.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.TtlAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Ttl, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PTtlAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.PTtl, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PingAsync(CancellationToken token) + => IsString(SendExpectCodeAsync(token, Commands.Ping), "PONG"); + + private static ValueTask IsString(ValueTask pending, string expected) + { + return pending.IsCompletedSuccessfully ? (pending.Result == expected).AsValueTaskResult() + : Awaited(pending, expected); + + static async ValueTask Awaited(ValueTask pending, string expected) + => await pending.ConfigureAwait(false) == expected; + } + + ValueTask IRedisNativeClientAsync.EchoAsync(string text, CancellationToken token) + => SendExpectDataAsync(token, Commands.Echo, text.ToUtf8Bytes()).FromUtf8BytesAsync(); + + ValueTask IRedisNativeClientAsync.DbSizeAsync(CancellationToken token) + => SendExpectLongAsync(token, Commands.DbSize); + + ValueTask IRedisNativeClientAsync.LastSaveAsync(CancellationToken token) + => SendExpectLongAsync(token, Commands.LastSave).Await(t => t.FromUnixTime()); + + ValueTask IRedisNativeClientAsync.SaveAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Save); + + ValueTask IRedisNativeClientAsync.BgSaveAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.BgSave); + + ValueTask IRedisNativeClientAsync.ShutdownAsync(bool noSave, CancellationToken token) + => noSave + ? SendWithoutReadAsync(token, Commands.Shutdown, Commands.NoSave) + : SendWithoutReadAsync(token, Commands.Shutdown); + + ValueTask IRedisNativeClientAsync.BgRewriteAofAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.BgRewriteAof); + + ValueTask IRedisNativeClientAsync.QuitAsync(CancellationToken token) + => SendWithoutReadAsync(token, Commands.Quit); + + ValueTask IRedisNativeClientAsync.FlushDbAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.FlushDb); + + ValueTask IRedisNativeClientAsync.FlushAllAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.FlushAll); + + ValueTask IRedisNativeClientAsync.SlaveOfAsync(string hostname, int port, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.SlaveOf, hostname.ToUtf8Bytes(), port.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.SlaveOfNoOneAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.SlaveOf, Commands.No, Commands.One); + + ValueTask IRedisNativeClientAsync.KeysAsync(string pattern, CancellationToken token) + { + AssertNotNull(pattern, nameof(pattern)); + return SendExpectMultiDataAsync(token, Commands.Keys, pattern.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.MGetAsync(string[] keys, CancellationToken token) + { + AssertNotNull(keys, nameof(keys)); + if (keys.Length == 0) + throw new ArgumentException("keys"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.MGet, keys); + + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SetExAsync(string key, int expireInSeconds, byte[] value, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", nameof(value)); + + return SendExpectSuccessAsync(token, Commands.SetEx, key.ToUtf8Bytes(), expireInSeconds.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.WatchAsync(string[] keys, CancellationToken token) + { + AssertNotNull(keys, nameof(keys)); + if (keys.Length == 0) + throw new ArgumentException("keys"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.Watch, keys); + + return SendExpectCodeAsync(token, cmdWithArgs).Await(); + } + + ValueTask IRedisNativeClientAsync.UnWatchAsync(CancellationToken token) + => SendExpectCodeAsync(token, Commands.UnWatch).Await(); + + ValueTask IRedisNativeClientAsync.AppendAsync(string key, byte[] value, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Append, key.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.GetRangeAsync(string key, int fromIndex, int toIndex, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.GetRange, key.ToUtf8Bytes(), fromIndex.ToUtf8Bytes(), toIndex.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SetRangeAsync(string key, int offset, byte[] value, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.SetRange, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.GetBitAsync(string key, int offset, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.GetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SetBitAsync(string key, int offset, int value, CancellationToken token) + { + AssertNotNull(key); + if (value > 1 || value < 0) + throw new ArgumentOutOfRangeException(nameof(value), "value is out of range"); + return SendExpectLongAsync(token, Commands.SetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PersistAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Persist, key.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PSetExAsync(string key, long expireInMs, byte[] value, CancellationToken token) + { + AssertNotNull(key); + return SendExpectSuccessAsync(token, Commands.PSetEx, key.ToUtf8Bytes(), expireInMs.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SetNXAsync(string key, byte[] value, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", "value"); + + return SendExpectLongAsync(token, Commands.SetNx, key.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SPopAsync(string setId, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectDataAsync(token, Commands.SPop, setId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SPopAsync(string setId, int count, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectMultiDataAsync(token, Commands.SPop, setId.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SlowlogResetAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Slowlog, "RESET".ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.SlowlogGetAsync(int? top, CancellationToken token) + { + if (top.HasValue) + return SendExpectDeeplyNestedMultiDataAsync(token, Commands.Slowlog, Commands.Get, top.Value.ToUtf8Bytes()); + else + return SendExpectDeeplyNestedMultiDataAsync(token, Commands.Slowlog, Commands.Get); + } + + ValueTask IRedisNativeClientAsync.ZCardAsync(string setId, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZCard, setId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZCountAsync(string setId, double min, double max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZScoreAsync(string setId, byte[] value, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectDoubleAsync(token, Commands.ZScore, setId.ToUtf8Bytes(), value); + } + + protected ValueTask RawCommandAsync(CancellationToken token, params object[] cmdWithArgs) + { + var byteArgs = new List(); + + foreach (var arg in cmdWithArgs) + { + if (arg == null) + { + byteArgs.Add(TypeConstants.EmptyByteArray); + continue; + } + + if (arg is byte[] bytes) + { + byteArgs.Add(bytes); + } + else if (arg.GetType().IsUserType()) + { + var json = arg.ToJson(); + byteArgs.Add(json.ToUtf8Bytes()); + } + else + { + var str = arg.ToString(); + byteArgs.Add(str.ToUtf8Bytes()); + } + } + + return SendExpectComplexResponseAsync(token, byteArgs.ToArray()); + } + + ValueTask> IRedisNativeClientAsync.InfoAsync(CancellationToken token) + => SendExpectStringAsync(token, Commands.Info).Await(ParseInfoResult); + + ValueTask IRedisNativeClientAsync.ZRangeByLexAsync(string setId, string min, string max, int? skip, int? take, CancellationToken token) + => SendExpectMultiDataAsync(token, GetZRangeByLexArgs(setId, min, max, skip, take)); + + ValueTask IRedisNativeClientAsync.ZLexCountAsync(string setId, string min, string max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + + return SendExpectLongAsync(token, + Commands.ZLexCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByLexAsync(string setId, string min, string max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + + return SendExpectLongAsync(token, + Commands.ZRemRangeByLex, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.CalculateSha1Async(string luaBody, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + byte[] buffer = Encoding.UTF8.GetBytes(luaBody); + return BitConverter.ToString(buffer.ToSha1Hash()).Replace("-", "").AsValueTaskResult(); + } + + ValueTask IRedisNativeClientAsync.ScriptExistsAsync(byte[][] sha1Refs, CancellationToken token) + { + var keysAndValues = MergeCommandWithArgs(Commands.Script, Commands.Exists, sha1Refs); + return SendExpectMultiDataAsync(token, keysAndValues); + } + + ValueTask IRedisNativeClientAsync.ScriptFlushAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Script, Commands.Flush); + + ValueTask IRedisNativeClientAsync.ScriptKillAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Script, Commands.Kill); + + ValueTask IRedisNativeClientAsync.ScriptLoadAsync(string body, CancellationToken token) + { + AssertNotNull(body, nameof(body)); + + var cmdArgs = MergeCommandWithArgs(Commands.Script, Commands.Load, body.ToUtf8Bytes()); + return SendExpectDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.StrLenAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.StrLen, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.LLenAsync(string listId, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectLongAsync(token, Commands.LLen, listId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SCardAsync(string setId, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.SCard, setId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HLenAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectLongAsync(token, Commands.HLen, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.EvalCommandAsync(string luaBody, int numberKeysInArgs, byte[][] keys, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); + return RawCommandAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalShaCommandAsync(string sha1, int numberKeysInArgs, byte[][] keys, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); + return RawCommandAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalAsync(string luaBody, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectMultiDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalShaAsync(string sha1, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectMultiDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalIntAsync(string luaBody, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectLongAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalShaIntAsync(string sha1, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectLongAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalStrAsync(string luaBody, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectDataAsync(token, cmdArgs).FromUtf8BytesAsync(); + } + + ValueTask IRedisNativeClientAsync.EvalShaStrAsync(string sha1, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectDataAsync(token, cmdArgs).FromUtf8BytesAsync(); + } + + ValueTask IRedisNativeClientAsync.SMembersAsync(string setId, CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.SMembers, setId.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.SAddAsync(string setId, byte[][] values, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + AssertNotNull(values, nameof(values)); + if (values.Length == 0) + throw new ArgumentException(nameof(values)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SAdd, setId.ToUtf8Bytes(), values); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SRemAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.SRem, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.IncrByAsync(string key, long count, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.IncrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.IncrByFloatAsync(string key, double incrBy, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDoubleAsync(token, Commands.IncrByFloat, key.ToUtf8Bytes(), incrBy.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.IncrAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Incr, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DecrAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Decr, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DecrByAsync(string key, long count, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.DecrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ConfigGetAsync(string pattern, CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.Config, Commands.Get, pattern.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.ConfigSetAsync(string item, byte[] value, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Config, Commands.Set, item.ToUtf8Bytes(), value); + + ValueTask IRedisNativeClientAsync.ConfigResetStatAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Config, Commands.ResetStat); + + ValueTask IRedisNativeClientAsync.ConfigRewriteAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Config, Commands.Rewrite); + + ValueTask IRedisNativeClientAsync.DebugSegfaultAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Debug, Commands.Segfault); + + ValueTask IRedisNativeClientAsync.DumpAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.Dump, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RestoreAsync(string key, long expireMs, byte[] dumpValue, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.Restore, key.ToUtf8Bytes(), expireMs.ToUtf8Bytes(), dumpValue); + } + + ValueTask IRedisNativeClientAsync.MigrateAsync(string host, int port, string key, int destinationDb, long timeoutMs, CancellationToken token) + { + AssertNotNull(key); + return SendExpectSuccessAsync(token, Commands.Migrate, host.ToUtf8Bytes(), port.ToUtf8Bytes(), key.ToUtf8Bytes(), destinationDb.ToUtf8Bytes(), timeoutMs.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.MoveAsync(string key, int db, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Move, key.ToUtf8Bytes(), db.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.ObjectIdleTimeAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Object, Commands.IdleTime, key.ToUtf8Bytes()); + } + + async ValueTask IRedisNativeClientAsync.RoleAsync(CancellationToken token) + => (await SendExpectComplexResponseAsync(token, Commands.Role).ConfigureAwait(false)).ToRedisText(); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(object[] cmdWithArgs, CancellationToken token) + => SendExpectComplexResponseAsync(token, PrepareRawCommand(cmdWithArgs)); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(byte[][] cmdWithBinaryArgs, CancellationToken token) + => SendExpectComplexResponseAsync(token, cmdWithBinaryArgs); + + ValueTask IRedisNativeClientAsync.ClientGetNameAsync(CancellationToken token) + => SendExpectStringAsync(token, Commands.Client, Commands.GetName); + + ValueTask IRedisNativeClientAsync.ClientSetNameAsync(string name, CancellationToken token) + { + ClientValidateName(name); + return SendExpectSuccessAsync(token, Commands.Client, Commands.SetName, name.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ClientKillAsync(string clientAddr, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Client, Commands.Kill, clientAddr.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.ClientKillAsync(string addr, string id, string type, string skipMe, CancellationToken token) + => SendExpectLongAsync(token, ClientKillPrepareArgs(addr, id, type, skipMe)); + + ValueTask IRedisNativeClientAsync.ClientListAsync(CancellationToken token) + => SendExpectDataAsync(token, Commands.Client, Commands.List); + + ValueTask IRedisNativeClientAsync.ClientPauseAsync(int timeOutMs, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Client, Commands.Pause, timeOutMs.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.MSetNxAsync(byte[][] keys, byte[][] values, CancellationToken token) + { + var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); + return SendExpectLongAsync(token, keysAndValues).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.MSetNxAsync(string[] keys, byte[][] values, CancellationToken token) + => AsAsync().MSetNxAsync(keys.ToMultiByteArray(), values, token); + + ValueTask IRedisNativeClientAsync.GetSetAsync(string key, byte[] value, CancellationToken token) + { + GetSetAssertArgs(key, ref value); + return SendExpectDataAsync(token, Commands.GetSet, key.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.MGetAsync(byte[][] keys, CancellationToken token) + => SendExpectMultiDataAsync(token, MGetPrepareArgs(keys)); + + ValueTask IRedisNativeClientAsync.SScanAsync(string setId, ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + { + return SendExpectScanResultAsync(token, Commands.SScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResultAsync(token, Commands.SScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZScanAsync(string setId, ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + { + return SendExpectScanResultAsync(token, Commands.ZScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResultAsync(token, Commands.ZScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HScanAsync(string hashId, ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + { + return SendExpectScanResultAsync(token, Commands.HScan, + hashId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResultAsync(token, Commands.HScan, + hashId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PfAddAsync(string key, byte[][] elements, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PfAdd, key.ToUtf8Bytes(), elements); + return SendExpectLongAsync(token, cmdWithArgs).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PfCountAsync(string key, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PfCount, key.ToUtf8Bytes()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.PfMergeAsync(string toKeyId, string[] fromKeys, CancellationToken token) + { + var fromKeyBytes = fromKeys.Map(x => x.ToUtf8Bytes()).ToArray(); + var cmdWithArgs = MergeCommandWithArgs(Commands.PfMerge, toKeyId.ToUtf8Bytes(), fromKeyBytes); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SortAsync(string listOrSetId, SortOptions sortOptions, CancellationToken token) + => SendExpectMultiDataAsync(token, SortPrepareArgs(listOrSetId, sortOptions)); + + ValueTask IRedisNativeClientAsync.LRangeAsync(string listId, int startingFrom, int endingAt, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectMultiDataAsync(token, Commands.LRange, listId.ToUtf8Bytes(), startingFrom.ToUtf8Bytes(), endingAt.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RPushXAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + return SendExpectLongAsync(token, Commands.RPush, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LPushAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + return SendExpectLongAsync(token, Commands.LPush, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LPushXAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + return SendExpectLongAsync(token, Commands.LPushX, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LTrimAsync(string listId, int keepStartingFrom, int keepEndingAt, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectSuccessAsync(token, Commands.LTrim, listId.ToUtf8Bytes(), keepStartingFrom.ToUtf8Bytes(), keepEndingAt.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.LRemAsync(string listId, int removeNoOfMatches, byte[] value, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectLongAsync(token, Commands.LRem, listId.ToUtf8Bytes(), removeNoOfMatches.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LIndexAsync(string listId, int listIndex, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectDataAsync(token, Commands.LIndex, listId.ToUtf8Bytes(), listIndex.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.LInsertAsync(string listId, bool insertBefore, byte[] pivot, byte[] value, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + var position = insertBefore ? Commands.Before : Commands.After; + return SendExpectSuccessAsync(token, Commands.LInsert, listId.ToUtf8Bytes(), position, pivot, value); + } + + ValueTask IRedisNativeClientAsync.LSetAsync(string listId, int listIndex, byte[] value, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectSuccessAsync(token, Commands.LSet, listId.ToUtf8Bytes(), listIndex.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LPopAsync(string listId, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectDataAsync(token, Commands.LPop, listId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RPopAsync(string listId, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectDataAsync(token, Commands.RPop, listId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.BLPopAsync(string listId, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectMultiDataAsync(token, Commands.BLPop, listId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.BLPopAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listIds, nameof(listIds)); + var args = new List { Commands.BLPop }; + args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); + args.Add(timeOutSecs.ToUtf8Bytes()); + return SendExpectMultiDataAsync(token, args.ToArray()); + } + + async ValueTask IRedisNativeClientAsync.BLPopValueAsync(string listId, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BLPopAsync(new[] { listId }, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse[1]; + } + + async ValueTask IRedisNativeClientAsync.BLPopValueAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BLPopAsync(listIds, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse; + } + + ValueTask IRedisNativeClientAsync.BRPopAsync(string listId, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectMultiDataAsync(token, Commands.BRPop, listId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.BRPopAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listIds, nameof(listIds)); + var args = new List { Commands.BRPop }; + args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); + args.Add(timeOutSecs.ToUtf8Bytes()); + return SendExpectMultiDataAsync(token, args.ToArray()); + } + + ValueTask IRedisNativeClientAsync.RPopLPushAsync(string fromListId, string toListId, CancellationToken token) + { + AssertNotNull(fromListId, nameof(fromListId)); + AssertNotNull(toListId, nameof(toListId)); + return SendExpectDataAsync(token, Commands.RPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes()); + } + + async ValueTask IRedisNativeClientAsync.BRPopValueAsync(string listId, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BRPopAsync(new[] { listId }, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse[1]; + } + + async ValueTask IRedisNativeClientAsync.BRPopValueAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BRPopAsync(listIds, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse; + } + + async ValueTask IRedisNativeClientAsync.BRPopLPushAsync(string fromListId, string toListId, int timeOutSecs, CancellationToken token) + { + AssertNotNull(fromListId, nameof(fromListId)); + AssertNotNull(toListId, nameof(toListId)); + byte[][] result = await SendExpectMultiDataAsync(token, Commands.BRPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + return result.Length == 0 ? null : result[1]; + } + + ValueTask IRedisNativeClientAsync.SMoveAsync(string fromSetId, string toSetId, byte[] value, CancellationToken token) + { + AssertNotNull(fromSetId, nameof(fromSetId)); + AssertNotNull(toSetId, nameof(toSetId)); + return SendExpectSuccessAsync(token, Commands.SMove, fromSetId.ToUtf8Bytes(), toSetId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SIsMemberAsync(string setId, byte[] value, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.SIsMember, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SInterAsync(string[] setIds, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.SInter, setIds); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SInterStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SInterStore, setIdsList.ToArray()); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SUnionAsync(string[] setIds, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.SUnion, setIds); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SUnionStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SUnionStore, setIdsList.ToArray()); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SDiffAsync(string fromSetId, string[] withSetIds, CancellationToken token) + { + var setIdsList = new List(withSetIds); + setIdsList.Insert(0, fromSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SDiff, setIdsList.ToArray()); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SDiffStoreAsync(string intoSetId, string fromSetId, string[] withSetIds, CancellationToken token) + { + var setIdsList = new List(withSetIds); + setIdsList.Insert(0, fromSetId); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SDiffStore, setIdsList.ToArray()); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SRandMemberAsync(string setId, CancellationToken token) + => SendExpectDataAsync(token, Commands.SRandMember, setId.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.ZRemAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.ZRem, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRemAsync(string setId, byte[][] values, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + AssertNotNull(values, nameof(values)); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZRem, setId.ToUtf8Bytes(), values); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ZIncrByAsync(string setId, double incrBy, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectDoubleAsync(token, Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToFastUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZIncrByAsync(string setId, long incrBy, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectDoubleAsync(token, Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRankAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.ZRank, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRevRankAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.ZRevRank, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRangeAsync(string setId, int min, int max, CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.ZRange, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + + private ValueTask GetRangeAsync(byte[] commandBytes, string setId, int min, int max, bool withScores, CancellationToken token) + { + var args = GetRangeArgs(commandBytes, setId, min, max, withScores); + return SendExpectMultiDataAsync(token, args); + } + + ValueTask IRedisNativeClientAsync.ZRangeWithScoresAsync(string setId, int min, int max, CancellationToken token) + => GetRangeAsync(Commands.ZRange, setId, min, max, true, token); + + ValueTask IRedisNativeClientAsync.ZRevRangeAsync(string setId, int min, int max, CancellationToken token) + => GetRangeAsync(Commands.ZRevRange, setId, min, max, false, token); + + ValueTask IRedisNativeClientAsync.ZRevRangeWithScoresAsync(string setId, int min, int max, CancellationToken token) + => GetRangeAsync(Commands.ZRevRange, setId, min, max, true, token); + + private ValueTask GetRangeByScoreAsync(byte[] commandBytes, + string setId, double min, double max, int? skip, int? take, bool withScores, CancellationToken token) + { + var args = GetRangeByScoreArgs(commandBytes, setId, min, max, skip, take, withScores); + return SendExpectMultiDataAsync(token, args); + } + + ValueTask IRedisNativeClientAsync.ZRangeByScoreAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, false, token); + + ValueTask IRedisNativeClientAsync.ZRangeByScoreAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, false, token); + + ValueTask IRedisNativeClientAsync.ZRangeByScoreWithScoresAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, true, token); + + ValueTask IRedisNativeClientAsync.ZRangeByScoreWithScoresAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, true, token); + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, false, token); + } + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, false, token); + } + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreWithScoresAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, true, token); + } + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreWithScoresAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, true, token); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByRankAsync(string setId, int min, int max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZRemRangeByRank, setId.ToUtf8Bytes(), + min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), + fromScore.ToFastUtf8Bytes(), toScore.ToFastUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), + fromScore.ToUtf8Bytes(), toScore.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZUnionStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, setIds.Length.ToString()); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, setIdsList.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ZInterStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, setIds.Length.ToString()); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, setIdsList.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + internal ValueTask ZInterStoreAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + { + var totalArgs = new List(setIds); + totalArgs.Insert(0, setIds.Length.ToString()); + totalArgs.Insert(0, intoSetId); + totalArgs.AddRange(args); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, totalArgs.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + internal ValueTask ZUnionStoreAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + { + var totalArgs = new List(setIds); + totalArgs.Insert(0, setIds.Length.ToString()); + totalArgs.Insert(0, intoSetId); + totalArgs.AddRange(args); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, totalArgs.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.HMSetAsync(string hashId, byte[][] keys, byte[][] values, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + var cmdArgs = MergeCommandWithKeysAndValues(Commands.HMSet, hashId.ToUtf8Bytes(), keys, values); + return SendExpectSuccessAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.HSetNXAsync(string hashId, byte[] key, byte[] value, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HSetNx, hashId.ToUtf8Bytes(), key, value); + } + + ValueTask IRedisNativeClientAsync.HIncrbyAsync(string hashId, byte[] key, int incrementBy, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HIncrbyFloatAsync(string hashId, byte[] key, double incrementBy, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectDoubleAsync(token, Commands.HIncrByFloat, hashId.ToUtf8Bytes(), key, incrementBy.ToString(CultureInfo.InvariantCulture).ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HGetAsync(string hashId, byte[] key, CancellationToken token) + => HGetAsync(hashId.ToUtf8Bytes(), key, token); + + private ValueTask HGetAsync(byte[] hashId, byte[] key, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectDataAsync(token, Commands.HGet, hashId, key); + } + + ValueTask IRedisNativeClientAsync.HMGetAsync(string hashId, byte[][] keys, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + if (keys.Length == 0) + throw new ArgumentNullException(nameof(keys)); + + var cmdArgs = MergeCommandWithArgs(Commands.HMGet, hashId.ToUtf8Bytes(), keys); + return SendExpectMultiDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.HDelAsync(string hashId, byte[] key, CancellationToken token) + => HDelAsync(hashId.ToUtf8Bytes(), key, token); + + private ValueTask HDelAsync(byte[] hashId, byte[] key, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HDel, hashId, key); + } + + ValueTask IRedisNativeClientAsync.HExistsAsync(string hashId, byte[] key, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HExists, hashId.ToUtf8Bytes(), key); + } + + ValueTask IRedisNativeClientAsync.HKeysAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectMultiDataAsync(token, Commands.HKeys, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HValsAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectMultiDataAsync(token, Commands.HVals, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HGetAllAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectMultiDataAsync(token, Commands.HGetAll, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.GeoAddAsync(string key, double longitude, double latitude, string member, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + AssertNotNull(member, nameof(member)); + return SendExpectLongAsync(token, Commands.GeoAdd, key.ToUtf8Bytes(), longitude.ToUtf8Bytes(), latitude.ToUtf8Bytes(), member.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.GeoAddAsync(string key, RedisGeo[] geoPoints, CancellationToken token) + { + var cmdWithArgs = GeoAddPrepareArgs(key, geoPoints); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.GeoDistAsync(string key, string fromMember, string toMember, string unit, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + + return unit == null + ? SendExpectDoubleAsync(token, Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes()) + : SendExpectDoubleAsync(token, Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes(), unit.ToUtf8Bytes()); + } + + async ValueTask IRedisNativeClientAsync.GeoHashAsync(string key, string[] members, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.GeoHash, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); + var result = await SendExpectMultiDataAsync(token, cmdWithArgs).ConfigureAwait(false); + return result.ToStringArray(); + } + + async ValueTask> IRedisNativeClientAsync.GeoPosAsync(string key, string[] members, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.GeoPos, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); + var data = await SendExpectComplexResponseAsync(token, cmdWithArgs).ConfigureAwait(false); + return GeoPosParseResult(members, data); + } + + async ValueTask> IRedisNativeClientAsync.GeoRadiusAsync(string key, double longitude, double latitude, double radius, string unit, bool withCoords, bool withDist, bool withHash, int? count, bool? asc, CancellationToken token) + { + var cmdWithArgs = GeoRadiusPrepareArgs(key, longitude, latitude, radius, unit, + withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = (await SendExpectMultiDataAsync(token, cmdWithArgs).ConfigureAwait(false)).ToStringArray(); + foreach (var member in members) + { + to.Add(new RedisGeoResult { Member = member }); + } + } + else + { + var data = await SendExpectComplexResponseAsync(token, cmdWithArgs).ConfigureAwait(false); + GetRadiusParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + async ValueTask> IRedisNativeClientAsync.GeoRadiusByMemberAsync(string key, string member, double radius, string unit, bool withCoords, bool withDist, bool withHash, int? count, bool? asc, CancellationToken token) + { + var cmdWithArgs = GeoRadiusByMemberPrepareArgs(key, member, radius, unit, withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = (await SendExpectMultiDataAsync(token, cmdWithArgs).ConfigureAwait(false)).ToStringArray(); + foreach (var x in members) + { + to.Add(new RedisGeoResult { Member = x }); + } + } + else + { + var data = await SendExpectComplexResponseAsync(token, cmdWithArgs).ConfigureAwait(false); + GeoRadiusByMemberParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + ValueTask IRedisNativeClientAsync.PublishAsync(string toChannel, byte[] message, CancellationToken token) + => SendExpectLongAsync(token, Commands.Publish, toChannel.ToUtf8Bytes(), message); + + ValueTask IRedisNativeClientAsync.SubscribeAsync(string[] toChannels, CancellationToken token) + { + if (toChannels.Length == 0) + throw new ArgumentNullException(nameof(toChannels)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.Subscribe, toChannels); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.UnSubscribeAsync(string[] fromChannels, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.UnSubscribe, fromChannels); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.PSubscribeAsync(string[] toChannelsMatchingPatterns, CancellationToken token) + { + if (toChannelsMatchingPatterns.Length == 0) + throw new ArgumentNullException(nameof(toChannelsMatchingPatterns)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.PSubscribe, toChannelsMatchingPatterns); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.PUnSubscribeAsync(string[] fromChannelsMatchingPatterns, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PUnSubscribe, fromChannelsMatchingPatterns); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ReceiveMessagesAsync(CancellationToken token) + => ReadMultiDataAsync(token); + + ValueTask IRedisNativeClientAsync.CreateSubscriptionAsync(CancellationToken token) + => new RedisSubscription(this).AsValueTaskResult(); + + ValueTask IRedisNativeClientAsync.BitCountAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.BitCount, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DelAsync(params string[] keys) + => AsAsync().DelAsync(keys, default); + + ValueTask IRedisNativeClientAsync.SInterStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().SInterStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.SUnionAsync(params string[] setIds) + => AsAsync().SUnionAsync(setIds, default); + + ValueTask IRedisNativeClientAsync.WatchAsync(params string[] keys) + => AsAsync().WatchAsync(keys, default); + + ValueTask IRedisNativeClientAsync.SubscribeAsync(params string[] toChannels) + => AsAsync().SubscribeAsync(toChannels, default); + + ValueTask IRedisNativeClientAsync.UnSubscribeAsync(params string[] toChannels) + => AsAsync().UnSubscribeAsync(toChannels, default); + + ValueTask IRedisNativeClientAsync.PSubscribeAsync(params string[] toChannelsMatchingPatterns) + => AsAsync().PSubscribeAsync(toChannelsMatchingPatterns, default); + + ValueTask IRedisNativeClientAsync.PUnSubscribeAsync(params string[] toChannelsMatchingPatterns) + => AsAsync().PUnSubscribeAsync(toChannelsMatchingPatterns, default); + + ValueTask IRedisNativeClientAsync.SInterAsync(params string[] setIds) + => AsAsync().SInterAsync(setIds, default); + + ValueTask IRedisNativeClientAsync.SDiffAsync(string fromSetId, params string[] withSetIds) + => AsAsync().SDiffAsync(fromSetId, withSetIds, default); + + ValueTask IRedisNativeClientAsync.SDiffStoreAsync(string intoSetId, string fromSetId, params string[] withSetIds) + => AsAsync().SDiffStoreAsync(intoSetId, fromSetId, withSetIds, default); + + ValueTask IRedisNativeClientAsync.ZUnionStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().ZUnionStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.ZInterStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().ZInterStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.EvalCommandAsync(string luaBody, int numberKeysInArgs, params byte[][] keys) + => AsAsync().EvalCommandAsync(luaBody, numberKeysInArgs, keys, default); + + ValueTask IRedisNativeClientAsync.EvalShaCommandAsync(string sha1, int numberKeysInArgs, params byte[][] keys) + => AsAsync().EvalShaCommandAsync(sha1, numberKeysInArgs, keys, default); + + ValueTask IRedisNativeClientAsync.EvalAsync(string luaBody, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalAsync(luaBody, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalShaAsync(string sha1, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalShaAsync(sha1, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalIntAsync(string luaBody, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalIntAsync(luaBody, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalShaIntAsync(string sha1, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalShaIntAsync(sha1, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalStrAsync(string luaBody, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalStrAsync(luaBody, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalShaStrAsync(string sha1, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalShaStrAsync(sha1, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(params object[] cmdWithArgs) + => AsAsync().RawCommandAsync(cmdWithArgs, default); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(params byte[][] cmdWithBinaryArgs) + => AsAsync().RawCommandAsync(cmdWithBinaryArgs, default); + + ValueTask IRedisNativeClientAsync.MGetAsync(params string[] keys) + => AsAsync().MGetAsync(keys, default); + + ValueTask IRedisNativeClientAsync.PfAddAsync(string key, params byte[][] elements) + => AsAsync().PfAddAsync(key, elements, default); + + ValueTask IRedisNativeClientAsync.HMGetAsync(string hashId, params byte[][] keysAndArgs) + => AsAsync().HMGetAsync(hashId, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.MGetAsync(params byte[][] keysAndArgs) + => AsAsync().MGetAsync(keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.SUnionStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().SUnionStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.ScriptExistsAsync(params byte[][] sha1Refs) + => AsAsync().ScriptExistsAsync(sha1Refs, default); + + ValueTask IRedisNativeClientAsync.PfMergeAsync(string toKeyId, params string[] fromKeys) + => AsAsync().PfMergeAsync(toKeyId, fromKeys, default); + + ValueTask IRedisNativeClientAsync.GeoAddAsync(string key, params RedisGeo[] geoPoints) + => AsAsync().GeoAddAsync(key, geoPoints, default); + + ValueTask IRedisNativeClientAsync.GeoHashAsync(string key, params string[] members) + => AsAsync().GeoHashAsync(key, members, default); + + ValueTask> IRedisNativeClientAsync.GeoPosAsync(string key, params string[] members) + => AsAsync().GeoPosAsync(key, members, default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisNativeClient.cs b/src/ServiceStack.Redis/RedisNativeClient.cs index eeaf202a..220f0405 100644 --- a/src/ServiceStack.Redis/RedisNativeClient.cs +++ b/src/ServiceStack.Redis/RedisNativeClient.cs @@ -15,29 +15,31 @@ using System.IO; using System.Collections.Generic; using System.Linq; +using System.Net.Security; using System.Net.Sockets; +using System.Threading; using ServiceStack.Logging; -using ServiceStack.Redis; using ServiceStack.Redis.Pipeline; using ServiceStack.Text; +using System.Security.Authentication; namespace ServiceStack.Redis { /// /// This class contains all the common operations for the RedisClient. /// The client contains a 1:1 mapping of c# methods to redis operations of the same name. - /// - /// Not threadsafe use a pooled manager + /// + /// Not threadsafe, use a pooled manager! + /// All redis calls on a single instances write to the same Socket. + /// If used in multiple threads (or async Tasks) at the same time you will find + /// that commands are not executed properly by redis and Servicestack wont be able to (json) serialize + /// the data that comes back. /// public partial class RedisNativeClient : IRedisNativeClient { private static readonly ILog log = LogManager.GetLogger(typeof(RedisNativeClient)); - public const int DefaultDb = 0; - public const int DefaultPort = 6379; - public const string DefaultHost = "localhost"; - internal const int Success = 1; internal const int OneGb = 1073741824; private readonly byte[] endData = new[] { (byte)'\r', (byte)'\n' }; @@ -45,48 +47,79 @@ public partial class RedisNativeClient private int clientPort; private string lastCommand; private SocketException lastSocketException; - public bool HadExceptions { get; protected set; } + + internal long deactivatedAtTicks; + public DateTime? DeactivatedAt + { + get => deactivatedAtTicks != 0 + ? new DateTime(Interlocked.Read(ref deactivatedAtTicks), DateTimeKind.Utc) + : (DateTime?)null; + set + { + var ticksValue = value?.Ticks ?? 0; + Interlocked.Exchange(ref deactivatedAtTicks, ticksValue); + } + } + + public bool HadExceptions => deactivatedAtTicks > 0; protected Socket socket; + [Obsolete("The direct stream is no longer directly available", true)] // API BREAKING CHANGE since exposed protected BufferedStream Bstream; + protected SslStream sslStream; + + private BufferedReader bufferedReader; private IRedisTransactionBase transaction; private IRedisPipelineShared pipeline; - private Dictionary info; + const int YES = 1; + const int NO = 0; + /// /// Used to manage connection pooling /// - internal bool Active { get; set; } - internal PooledRedisClientManager ClientManager { get; set; } + private int active; + internal bool Active + { + get => Interlocked.CompareExchange(ref active, 0, 0) == YES; + private set => Interlocked.Exchange(ref active, value ? YES : NO); + } + + internal IHandleClientDispose ClientManager { get; set; } - internal int IdleTimeOutSecs = 240; //default on redis is 300 internal long LastConnectedAtTimestamp; - public int Id { get; set; } + public long Id { get; set; } public string Host { get; private set; } public int Port { get; private set; } + public bool Ssl { get; private set; } + public SslProtocols? SslProtocols { get; private set; } /// /// Gets or sets object key prefix. /// public string NamespacePrefix { get; set; } public int ConnectTimeout { get; set; } - public int RetryTimeout { get; set; } + private TimeSpan retryTimeout; + public int RetryTimeout + { + get => (int)retryTimeout.TotalMilliseconds; + set => retryTimeout = TimeSpan.FromMilliseconds(value); + } public int RetryCount { get; set; } public int SendTimeout { get; set; } - public int ReceiveTimeout { get; set; } + public int ReceiveTimeout { get; set; } public string Password { get; set; } + public string Client { get; set; } + public int IdleTimeOutSecs { get; set; } public Action ConnectionFilter { get; set; } internal IRedisTransactionBase Transaction { - get - { - return transaction; - } + get => transaction; set { if (value != null) @@ -95,13 +128,9 @@ internal IRedisTransactionBase Transaction } } - internal IRedisPipelineShared Pipeline { - get - { - return pipeline; - } + get => pipeline; set { if (value != null) @@ -110,94 +139,165 @@ internal IRedisPipelineShared Pipeline } } - public RedisNativeClient(string host) - : this(host, DefaultPort) {} + internal void EndPipeline() + { + ResetSendBuffer(); + + if (Pipeline != null) + { + Pipeline = null; + Interlocked.Increment(ref __requestsPerHour); + } + } + + public RedisNativeClient(string connectionString) + : this(connectionString.ToRedisEndpoint()) { } + + public RedisNativeClient(RedisEndpoint config) + { + Init(config); + } public RedisNativeClient(string host, int port) - : this(host, port, null) {} + : this(host, port, null) { } - public RedisNativeClient(string host, int port, string password = null, int db = DefaultDb) + public RedisNativeClient(string host, int port, string password = null, long db = RedisConfig.DefaultDb) { if (host == null) throw new ArgumentNullException("host"); - Host = host; - Port = port; - SendTimeout = -1; - ReceiveTimeout = -1; - Password = password; - Db = db; + Init(new RedisEndpoint(host, port, password, db)); + } + + private void Init(RedisEndpoint config) + { + Host = config.Host; + Port = config.Port; + ConnectTimeout = config.ConnectTimeout; + SendTimeout = config.SendTimeout; + ReceiveTimeout = config.ReceiveTimeout; + RetryTimeout = config.RetryTimeout; + Password = config.Password; + NamespacePrefix = config.NamespacePrefix; + Client = config.Client; + Db = config.Db; + Ssl = config.Ssl; + SslProtocols = config.SslProtocols; + IdleTimeOutSecs = config.IdleTimeOutSecs; + ServerVersionNumber = RedisConfig.AssumeServerVersion.GetValueOrDefault(); + LogPrefix = "#" + ClientId + " "; + JsConfig.InitStatics(); } public RedisNativeClient() - : this(DefaultHost, DefaultPort) {} - + : this(RedisConfig.DefaultHost, RedisConfig.DefaultPort) { } + #region Common Operations - int db; - public int Db + long db; + public long Db { - get - { - return db; - } + get => db; set { db = value; - SendExpectSuccess(Commands.Select, db.ToUtf8Bytes()); + + if (HasConnected) + { + ChangeDb(db); + } } } - public int DbSize + + public void ChangeDb(long db) { - get - { - return SendExpectInt(Commands.DbSize); - } + this.db = db; + SendExpectSuccess(Commands.Select, db.ToUtf8Bytes()); } + public long DbSize => SendExpectLong(Commands.DbSize); + public DateTime LastSave { get { - var t = SendExpectInt(Commands.LastSave); - return DateTimeExtensions.FromUnixTime(t); + var t = SendExpectLong(Commands.LastSave); + return t.FromUnixTime(); } } - public Dictionary Info + public Dictionary Info { - get - { - if (this.info == null) - { - var lines = SendExpectString(Commands.Info); - this.info = new Dictionary(); + get => ParseInfoResult(SendExpectString(Commands.Info)); + } - foreach (var line in lines - .Split(new[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)) - { - var p = line.IndexOf(':'); - if (p == -1) continue; + private static Dictionary ParseInfoResult(string lines) + { + var info = new Dictionary(); - this.info.Add(line.Substring(0, p), line.Substring(p + 1)); - } - } - return this.info; + foreach (var line in lines + .Split(new[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)) + { + var p = line.IndexOf(':'); + if (p == -1) continue; + + info[line.Substring(0, p)] = line.Substring(p + 1); } + + return info; } public string ServerVersion { get { - string version; - this.Info.TryGetValue("redis_version", out version); + this.Info.TryGetValue("redis_version", out var version); return version; } } + public RedisData RawCommand(params object[] cmdWithArgs) + { + return SendExpectComplexResponse(PrepareRawCommand(cmdWithArgs)); + } + + private static byte[][] PrepareRawCommand(object[] cmdWithArgs) + { + var byteArgs = new List(); + + foreach (var arg in cmdWithArgs) + { + if (arg == null) + { + byteArgs.Add(TypeConstants.EmptyByteArray); + continue; + } + + if (arg is byte[] bytes) + { + byteArgs.Add(bytes); + } + else if (arg.GetType().IsUserType()) + { + var json = arg.ToJson(); + byteArgs.Add(json.ToUtf8Bytes()); + } + else + { + var str = arg.ToString(); + byteArgs.Add(str.ToUtf8Bytes()); + } + } + return byteArgs.ToArray(); + } + + public RedisData RawCommand(params byte[][] cmdWithBinaryArgs) + { + return SendExpectComplexResponse(cmdWithBinaryArgs); + } + public bool Ping() { return SendExpectCode(Commands.Ping) == "PONG"; @@ -218,69 +318,82 @@ public void SlaveOfNoOne() SendExpectSuccess(Commands.SlaveOf, Commands.No, Commands.One); } - public byte[][] ConfigGet(string pattern) - { - return SendExpectMultiData(Commands.Config, Commands.Get, pattern.ToUtf8Bytes()); - } - - public void ConfigSet(string item, byte[] value) - { - SendExpectSuccess(Commands.Config, Commands.Set, item.ToUtf8Bytes(), value); - } - - public void ConfigResetStat() - { - SendExpectSuccess(Commands.Config, Commands.ResetStat); - } - - public byte[][] Time() - { - return SendExpectMultiData(Commands.Time); - } - - public void DebugSegfault() - { - SendExpectSuccess(Commands.Debug, Commands.Segfault); - } - - public byte[] Dump(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectData(Commands.Dump); - } - - public byte[] Restore(string key, long expireMs, byte[] dumpValue) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectData(Commands.Restore, key.ToUtf8Bytes(), expireMs.ToUtf8Bytes(), dumpValue); - } - - public void Migrate(string host, int port, int destinationDb, long timeoutMs) - { - SendExpectSuccess(Commands.Migrate, host.ToUtf8Bytes(), port.ToUtf8Bytes(), destinationDb.ToUtf8Bytes(), timeoutMs.ToUtf8Bytes()); - } - - public bool Move(string key, int db) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Move, key.ToUtf8Bytes(), db.ToUtf8Bytes()) == Success; - } - - public int ObjectIdleTime(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Object, Commands.IdleTime, key.ToUtf8Bytes()); - } - - public string Type(string key) + public byte[][] ConfigGet(string pattern) + { + return SendExpectMultiData(Commands.Config, Commands.Get, pattern.ToUtf8Bytes()); + } + + public void ConfigSet(string item, byte[] value) + { + SendExpectSuccess(Commands.Config, Commands.Set, item.ToUtf8Bytes(), value); + } + + public void ConfigResetStat() + { + SendExpectSuccess(Commands.Config, Commands.ResetStat); + } + + public void ConfigRewrite() + { + SendExpectSuccess(Commands.Config, Commands.Rewrite); + } + + public byte[][] Time() + { + return SendExpectMultiData(Commands.Time); + } + + public void DebugSegfault() + { + SendExpectSuccess(Commands.Debug, Commands.Segfault); + } + + public void DebugSleep(double durationSecs) + { + SendExpectSuccess(Commands.Debug, Commands.Sleep, durationSecs.ToUtf8Bytes()); + } + + public byte[] Dump(string key) + { + if (key == null) + throw new ArgumentNullException("key"); + + return SendExpectData(Commands.Dump, key.ToUtf8Bytes()); + } + + public byte[] Restore(string key, long expireMs, byte[] dumpValue) + { + if (key == null) + throw new ArgumentNullException("key"); + + return SendExpectData(Commands.Restore, key.ToUtf8Bytes(), expireMs.ToUtf8Bytes(), dumpValue); + } + + public void Migrate(string host, int port, string key, int destinationDb, long timeoutMs) + { + if (key == null) + throw new ArgumentNullException("key"); + + SendExpectSuccess(Commands.Migrate, host.ToUtf8Bytes(), port.ToUtf8Bytes(), key.ToUtf8Bytes(), destinationDb.ToUtf8Bytes(), timeoutMs.ToUtf8Bytes()); + } + + public bool Move(string key, int db) + { + if (key == null) + throw new ArgumentNullException("key"); + + return SendExpectLong(Commands.Move, key.ToUtf8Bytes(), db.ToUtf8Bytes()) == Success; + } + + public long ObjectIdleTime(string key) + { + if (key == null) + throw new ArgumentNullException("key"); + + return SendExpectLong(Commands.Object, Commands.IdleTime, key.ToUtf8Bytes()); + } + + public string Type(string key) { if (key == null) throw new ArgumentNullException("key"); @@ -289,8 +402,11 @@ public string Type(string key) } public RedisKeyType GetEntryType(string key) + => ParseEntryType(Type(key)); + + private protected RedisKeyType ParseEntryType(string type) { - switch (Type(key)) + switch (type) { case "none": return RedisKeyType.None; @@ -305,22 +421,22 @@ public RedisKeyType GetEntryType(string key) case "hash": return RedisKeyType.Hash; } - throw CreateResponseError("Invalid value"); + throw CreateResponseError($"Invalid Type '{type}'"); } - public int StrLen(string key) - { - if (key == null) - throw new ArgumentNullException("key"); + public long StrLen(string key) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectInt(Commands.StrLen, key.ToUtf8Bytes()); - } + return SendExpectLong(Commands.StrLen, key.ToUtf8Bytes()); + } - public void Set(string key, byte[] value) + public void Set(string key, byte[] value) { if (key == null) throw new ArgumentNullException("key"); - value = value ?? new byte[0]; + value = value ?? TypeConstants.EmptyByteArray; if (value.Length > OneGb) throw new ArgumentException("value exceeds 1G", "value"); @@ -328,76 +444,124 @@ public void Set(string key, byte[] value) SendExpectSuccess(Commands.Set, key.ToUtf8Bytes(), value); } + public void Set(string key, byte[] value, int expirySeconds, long expiryMs = 0) + { + Set(key.ToUtf8Bytes(), value, expirySeconds, expiryMs); + } + + public void Set(byte[] key, byte[] value, int expirySeconds, long expiryMs = 0) + { + if (key == null) + throw new ArgumentNullException("key"); + + value = value ?? TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", "value"); + + if (expirySeconds > 0) + SendExpectSuccess(Commands.Set, key, value, Commands.Ex, expirySeconds.ToUtf8Bytes()); + else if (expiryMs > 0) + SendExpectSuccess(Commands.Set, key, value, Commands.Px, expiryMs.ToUtf8Bytes()); + else + SendExpectSuccess(Commands.Set, key, value); + } + + public bool Set(string key, byte[] value, bool exists, int expirySeconds = 0, long expiryMs = 0) + { + var entryExists = exists ? Commands.Xx : Commands.Nx; + + if (expirySeconds > 0) + return SendExpectString(Commands.Set, key.ToUtf8Bytes(), value, Commands.Ex, expirySeconds.ToUtf8Bytes(), entryExists) == OK; + if (expiryMs > 0) + return SendExpectString(Commands.Set, key.ToUtf8Bytes(), value, Commands.Px, expiryMs.ToUtf8Bytes(), entryExists) == OK; + + return SendExpectString(Commands.Set, key.ToUtf8Bytes(), value, entryExists) == OK; + } + public void SetEx(string key, int expireInSeconds, byte[] value) + { + SetEx(key.ToUtf8Bytes(), expireInSeconds, value); + } + + public void SetEx(byte[] key, int expireInSeconds, byte[] value) { if (key == null) throw new ArgumentNullException("key"); - value = value ?? new byte[0]; + value = value ?? TypeConstants.EmptyByteArray; if (value.Length > OneGb) throw new ArgumentException("value exceeds 1G", "value"); - SendExpectSuccess(Commands.SetEx, key.ToUtf8Bytes(), expireInSeconds.ToUtf8Bytes(), value); + SendExpectSuccess(Commands.SetEx, key, expireInSeconds.ToUtf8Bytes(), value); } - public bool Persist(string key) - { - if (key == null) - throw new ArgumentNullException("key"); + public bool Persist(string key) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectInt(Commands.Persist, key.ToUtf8Bytes()) == Success; - } + return SendExpectLong(Commands.Persist, key.ToUtf8Bytes()) == Success; + } - public void PSetEx(string key, long expireInMs, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); + public void PSetEx(string key, long expireInMs, byte[] value) + { + if (key == null) + throw new ArgumentNullException("key"); - SendExpectSuccess(Commands.PSetEx, expireInMs.ToUtf8Bytes(), key.ToUtf8Bytes(), value); - } + SendExpectSuccess(Commands.PSetEx, key.ToUtf8Bytes(), expireInMs.ToUtf8Bytes(), value); + } - public int SetNX(string key, byte[] value) + public long SetNX(string key, byte[] value) { if (key == null) throw new ArgumentNullException("key"); - value = value ?? new byte[0]; + value = value ?? TypeConstants.EmptyByteArray; if (value.Length > OneGb) throw new ArgumentException("value exceeds 1G", "value"); - return SendExpectInt(Commands.SetNx, key.ToUtf8Bytes(), value); + return SendExpectLong(Commands.SetNx, key.ToUtf8Bytes(), value); } - public void MSet(byte[][] keys, byte[][] values) - { - var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); + public void MSet(byte[][] keys, byte[][] values) + { + var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); - SendExpectSuccess(keysAndValues); - } + SendExpectSuccess(keysAndValues); + } - public void MSet(string[] keys, byte[][] values) - { - MSet(keys.ToMultiByteArray(), values); - } + public void MSet(string[] keys, byte[][] values) + { + MSet(keys.ToMultiByteArray(), values); + } - public bool MSetNx(byte[][] keys, byte[][] values) - { - var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); + public bool MSetNx(byte[][] keys, byte[][] values) + { + var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); - return SendExpectInt(keysAndValues) == Success; - } + return SendExpectLong(keysAndValues) == Success; + } - public bool MSetNx(string[] keys, byte[][] values) - { - return MSetNx(keys.ToMultiByteArray(), values); - } + public bool MSetNx(string[] keys, byte[][] values) + { + return MSetNx(keys.ToMultiByteArray(), values); + } - public byte[] Get(string key) + public byte[] Get(string key) { return GetBytes(key); } - public object[] Slowlog(int ? top) + public byte[] Get(byte[] key) + { + if (key == null) + throw new ArgumentNullException("key"); + + return SendExpectData(Commands.Get, key); + } + + public object[] Slowlog(int? top) { if (top.HasValue) return SendExpectDeeplyNestedMultiData(Commands.Slowlog, Commands.Get, top.Value.ToUtf8Bytes()); @@ -419,41 +583,50 @@ public byte[] GetBytes(string key) } public byte[] GetSet(string key, byte[] value) + { + GetSetAssertArgs(key, ref value); + return SendExpectData(Commands.GetSet, key.ToUtf8Bytes(), value); + } + + private static void GetSetAssertArgs(string key, ref byte[] value) { if (key == null) throw new ArgumentNullException("key"); - value = value ?? new byte[0]; + value = value ?? TypeConstants.EmptyByteArray; if (value.Length > OneGb) throw new ArgumentException("value exceeds 1G", "value"); - - return SendExpectData(Commands.GetSet, key.ToUtf8Bytes(), value); } - public int Exists(string key) + public long Exists(string key) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.Exists, key.ToUtf8Bytes()); + return SendExpectLong(Commands.Exists, key.ToUtf8Bytes()); + } + + public long Del(string key) + { + return Del(key.ToUtf8Bytes()); } - public int Del(string key) + public long Del(byte[] key) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.Del, key.ToUtf8Bytes()); + return SendExpectLong(Commands.Del, key); } - public int Del(params string[] keys) + public long Del(params string[] keys) { if (keys == null) throw new ArgumentNullException("keys"); var cmdWithArgs = MergeCommandWithArgs(Commands.Del, keys); - return SendExpectInt(cmdWithArgs); + return SendExpectLong(cmdWithArgs); } public long Incr(string key) @@ -472,15 +645,22 @@ public long IncrBy(string key, int count) return SendExpectLong(Commands.IncrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); } - public double IncrByFloat(string key, double incrBy) - { - if (key == null) - throw new ArgumentNullException("key"); + public long IncrBy(string key, long count) + { + if (key == null) + throw new ArgumentNullException("key"); + return SendExpectLong(Commands.IncrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + public double IncrByFloat(string key, double incrBy) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectDouble(Commands.IncrBy, key.ToUtf8Bytes(), incrBy.ToUtf8Bytes()); - } + return SendExpectDouble(Commands.IncrByFloat, key.ToUtf8Bytes(), incrBy.ToUtf8Bytes()); + } - public long Decr(string key) + public long Decr(string key) { if (key == null) throw new ArgumentNullException("key"); @@ -496,55 +676,55 @@ public long DecrBy(string key, int count) return SendExpectLong(Commands.DecrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); } - public int Append(string key, byte[] value) + public long Append(string key, byte[] value) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.Append, key.ToUtf8Bytes(), value); + return SendExpectLong(Commands.Append, key.ToUtf8Bytes(), value); } - - public byte[] Substr(string key, int fromIndex, int toIndex) + + public byte[] GetRange(string key, int fromIndex, int toIndex) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectData(Commands.Substr, key.ToUtf8Bytes(), fromIndex.ToUtf8Bytes(), toIndex.ToUtf8Bytes()); + return SendExpectData(Commands.GetRange, key.ToUtf8Bytes(), fromIndex.ToUtf8Bytes(), toIndex.ToUtf8Bytes()); } - public byte[] GetRange(string key, int fromIndex, int toIndex) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectData(Commands.GetRange, key.ToUtf8Bytes(), fromIndex.ToUtf8Bytes(), toIndex.ToUtf8Bytes()); - } - - public int SetRange(string key, int offset, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); + public long SetRange(string key, int offset, byte[] value) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectInt(Commands.SetRange, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value); - } + return SendExpectLong(Commands.SetRange, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value); + } - public int GetBit(string key, int offset) + public long GetBit(string key, int offset) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.GetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes()); + return SendExpectLong(Commands.GetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes()); } - public int SetBit(string key, int offset, int value) + public long SetBit(string key, int offset, int value) { if (key == null) throw new ArgumentNullException("key"); if (value > 1 || value < 0) - throw new ArgumentException("value is out of range"); + throw new ArgumentException("value is out of range"); + + return SendExpectLong(Commands.SetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value.ToUtf8Bytes()); + } + + public long BitCount(string key) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectInt(Commands.SetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value.ToUtf8Bytes()); + return SendExpectLong(Commands.BitCount, key.ToUtf8Bytes()); } public string RandomKey() @@ -553,74 +733,84 @@ public string RandomKey() } public void Rename(string oldKeyname, string newKeyname) + { + CheckRenameKeys(oldKeyname, newKeyname); + SendExpectSuccess(Commands.Rename, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()); + } + + private protected static void CheckRenameKeys(string oldKeyname, string newKeyname) { if (oldKeyname == null) throw new ArgumentNullException("oldKeyname"); if (newKeyname == null) throw new ArgumentNullException("newKeyname"); - - SendExpectSuccess(Commands.Rename, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()); } - public bool RenameNx(string oldKeyname, string newKeyname) - { - if (oldKeyname == null) - throw new ArgumentNullException("oldKeyname"); - if (newKeyname == null) - throw new ArgumentNullException("newKeyname"); + public bool RenameNx(string oldKeyname, string newKeyname) + { + CheckRenameKeys(oldKeyname, newKeyname); + return SendExpectLong(Commands.RenameNx, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()) == Success; + } - return SendExpectInt(Commands.RenameNx, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()) == Success; - } + public bool Expire(string key, int seconds) + { + return Expire(key.ToUtf8Bytes(), seconds); + } - public bool Expire(string key, int seconds) + public bool Expire(byte[] key, int seconds) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.Expire, key.ToUtf8Bytes(), seconds.ToUtf8Bytes()) == Success; + return SendExpectLong(Commands.Expire, key, seconds.ToUtf8Bytes()) == Success; + } + + public bool PExpire(string key, long ttlMs) + { + return PExpire(key.ToUtf8Bytes(), ttlMs); } - public bool PExpire(string key, long ttlMs) - { - if (key == null) - throw new ArgumentNullException("key"); + public bool PExpire(byte[] key, long ttlMs) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectInt(Commands.PExpire, key.ToUtf8Bytes(), ttlMs.ToUtf8Bytes()) == Success; - } + return SendExpectLong(Commands.PExpire, key, ttlMs.ToUtf8Bytes()) == Success; + } - public bool ExpireAt(string key, long unixTime) + public bool ExpireAt(string key, long unixTime) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.ExpireAt, key.ToUtf8Bytes(), unixTime.ToUtf8Bytes()) == Success; + return SendExpectLong(Commands.ExpireAt, key.ToUtf8Bytes(), unixTime.ToUtf8Bytes()) == Success; } - public bool PExpireAt(string key, long unixTimeMs) - { - if (key == null) - throw new ArgumentNullException("key"); + public bool PExpireAt(string key, long unixTimeMs) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectInt(Commands.PExpireAt, key.ToUtf8Bytes(), unixTimeMs.ToUtf8Bytes()) == Success; - } + return SendExpectLong(Commands.PExpireAt, key.ToUtf8Bytes(), unixTimeMs.ToUtf8Bytes()) == Success; + } - public int Ttl(string key) + public long Ttl(string key) { if (key == null) throw new ArgumentNullException("key"); - return SendExpectInt(Commands.Ttl, key.ToUtf8Bytes()); + return SendExpectLong(Commands.Ttl, key.ToUtf8Bytes()); } - public long PTtl(string key) - { - if (key == null) - throw new ArgumentNullException("key"); + public long PTtl(string key) + { + if (key == null) + throw new ArgumentNullException("key"); - return SendExpectLong(Commands.PTtl, key.ToUtf8Bytes()); - } + return SendExpectLong(Commands.PTtl, key.ToUtf8Bytes()); + } - public void Save() + public void Save() { SendExpectSuccess(Commands.Save); } @@ -637,7 +827,12 @@ public void BgSave() public void Shutdown() { - SendCommand(Commands.Shutdown); + SendWithoutRead(Commands.Shutdown); + } + + public void ShutdownNoSave() + { + SendWithoutRead(Commands.Shutdown, Commands.NoSave); } public void BgRewriteAof() @@ -647,7 +842,7 @@ public void BgRewriteAof() public void Quit() { - SendCommand(Commands.Quit); + SendWithoutRead(Commands.Quit); } public void FlushDb() @@ -660,32 +855,113 @@ public void FlushAll() SendExpectSuccess(Commands.FlushAll); } - public byte[][] Keys(string pattern) + public RedisText Role() { - if (pattern == null) - throw new ArgumentNullException("pattern"); + return SendExpectComplexResponse(Commands.Role).ToRedisText(); + } - return SendExpectMultiData(Commands.Keys, pattern.ToUtf8Bytes()); + public string ClientGetName() + { + return SendExpectString(Commands.Client, Commands.GetName); } - public byte[][] MGet(params byte[][] keys) + public void ClientSetName(string name) { - if (keys == null) - throw new ArgumentNullException("keys"); - if (keys.Length == 0) - throw new ArgumentException("keys"); + ClientValidateName(name); + SendExpectSuccess(Commands.Client, Commands.SetName, name.ToUtf8Bytes()); + } - var cmdWithArgs = MergeCommandWithArgs(Commands.MGet, keys); + private static void ClientValidateName(string name) + { + if (string.IsNullOrEmpty(name)) + throw new ArgumentException("Name cannot be null or empty"); - return SendExpectMultiData(cmdWithArgs); + if (name.Contains(" ")) + throw new ArgumentException("Name cannot contain spaces"); } - public byte[][] MGet(params string[] keys) + public void ClientPause(int timeOutMs) { - if (keys == null) - throw new ArgumentNullException("keys"); - if (keys.Length == 0) - throw new ArgumentException("keys"); + SendExpectSuccess(Commands.Client, Commands.Pause, timeOutMs.ToUtf8Bytes()); + } + + public byte[] ClientList() + { + return SendExpectData(Commands.Client, Commands.List); + } + + public void ClientKill(string clientAddr) + { + SendExpectSuccess(Commands.Client, Commands.Kill, clientAddr.ToUtf8Bytes()); + } + + public long ClientKill(string addr = null, string id = null, string type = null, string skipMe = null) + { + return SendExpectLong(ClientKillPrepareArgs(addr, id, type, skipMe)); + } + + static byte[][] ClientKillPrepareArgs(string addr, string id, string type, string skipMe) + { + var cmdWithArgs = new List + { + Commands.Client, Commands.Kill, + }; + + if (addr != null) + { + cmdWithArgs.Add(Commands.Addr); + cmdWithArgs.Add(addr.ToUtf8Bytes()); + } + + if (id != null) + { + cmdWithArgs.Add(Commands.Id); + cmdWithArgs.Add(id.ToUtf8Bytes()); + } + + if (type != null) + { + cmdWithArgs.Add(Commands.Type); + cmdWithArgs.Add(type.ToUtf8Bytes()); + } + + if (skipMe != null) + { + cmdWithArgs.Add(Commands.SkipMe); + cmdWithArgs.Add(skipMe.ToUtf8Bytes()); + } + return cmdWithArgs.ToArray(); + } + + public byte[][] Keys(string pattern) + { + if (pattern == null) + throw new ArgumentNullException("pattern"); + + return SendExpectMultiData(Commands.Keys, pattern.ToUtf8Bytes()); + } + + public byte[][] MGet(params byte[][] keys) + { + return SendExpectMultiData(MGetPrepareArgs(keys)); + } + + private static byte[][] MGetPrepareArgs(byte[][] keys) + { + if (keys == null) + throw new ArgumentNullException("keys"); + if (keys.Length == 0) + throw new ArgumentException("keys"); + + return MergeCommandWithArgs(Commands.MGet, keys); + } + + public byte[][] MGet(params string[] keys) + { + if (keys == null) + throw new ArgumentNullException("keys"); + if (keys.Length == 0) + throw new ArgumentException("keys"); var cmdWithArgs = MergeCommandWithArgs(Commands.MGet, keys); @@ -704,17 +980,18 @@ public void Watch(params string[] keys) SendExpectCode(cmdWithArgs); } + public void UnWatch() { SendExpectCode(Commands.UnWatch); } + internal void Multi() { //make sure socket is connected. Otherwise, fetch of server info will interfere //with pipeline AssertConnectedSocket(); - if (!SendCommand(Commands.Multi)) - throw CreateConnectionError(); + SendWithoutRead(Commands.Multi); } /// @@ -723,9 +1000,7 @@ internal void Multi() /// Number of results internal void Exec() { - if (!SendCommand(Commands.Exec)) - throw CreateConnectionError(); - + SendWithoutRead(Commands.Exec); } internal void Discard() @@ -733,6 +1008,101 @@ internal void Discard() SendExpectSuccess(Commands.Discard); } + public ScanResult Scan(ulong cursor, int count = 10, string match = null) + { + if (match == null) + return SendExpectScanResult(Commands.Scan, cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + + return SendExpectScanResult(Commands.Scan, cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + public ScanResult SScan(string setId, ulong cursor, int count = 10, string match = null) + { + if (match == null) + { + return SendExpectScanResult(Commands.SScan, setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResult(Commands.SScan, setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + public ScanResult ZScan(string setId, ulong cursor, int count = 10, string match = null) + { + if (match == null) + { + return SendExpectScanResult(Commands.ZScan, setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResult(Commands.ZScan, setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + public ScanResult HScan(string hashId, ulong cursor, int count = 10, string match = null) + { + if (match == null) + { + return SendExpectScanResult(Commands.HScan, hashId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResult(Commands.HScan, hashId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + + internal ScanResult SendExpectScanResult(byte[] cmd, params byte[][] args) + { + var cmdWithArgs = MergeCommandWithArgs(cmd, args); + var multiData = SendExpectDeeplyNestedMultiData(cmdWithArgs); + return ParseScanResult(multiData); + } + internal static ScanResult ParseScanResult(object[] multiData) + { + var counterBytes = (byte[])multiData[0]; + + var ret = new ScanResult + { + Cursor = ulong.Parse(counterBytes.FromUtf8Bytes()), + Results = new List() + }; + var keysBytes = (object[])multiData[1]; + + foreach (var keyBytes in keysBytes) + { + ret.Results.Add((byte[])keyBytes); + } + + return ret; + } + + public bool PfAdd(string key, params byte[][] elements) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PfAdd, key.ToUtf8Bytes(), elements); + return SendExpectLong(cmdWithArgs) == 1; + } + + public long PfCount(string key) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PfCount, key.ToUtf8Bytes()); + return SendExpectLong(cmdWithArgs); + } + + public void PfMerge(string toKeyId, params string[] fromKeys) + { + var fromKeyBytes = fromKeys.Map(x => x.ToUtf8Bytes()).ToArray(); + var cmdWithArgs = MergeCommandWithArgs(Commands.PfMerge, toKeyId.ToUtf8Bytes(), fromKeyBytes); + SendExpectSuccess(cmdWithArgs); + } + #endregion @@ -743,18 +1113,44 @@ public byte[][] SMembers(string setId) return SendExpectMultiData(Commands.SMembers, setId.ToUtf8Bytes()); } - public int SAdd(string setId, byte[] value) + public long SAdd(string setId, byte[] value) { AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.SAdd, setId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.SAdd, setId.ToUtf8Bytes(), value); } - public int SRem(string setId, byte[] value) + public long SAdd(string setId, byte[][] values) + { + if (setId == null) + throw new ArgumentNullException("setId"); + if (values == null) + throw new ArgumentNullException("values"); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SAdd, setId.ToUtf8Bytes(), values); + return SendExpectLong(cmdWithArgs); + } + + public long SRem(string setId, byte[] value) { AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.SRem, setId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.SRem, setId.ToUtf8Bytes(), value); + } + + public long SRem(string setId, byte[][] values) + { + if (setId == null) + throw new ArgumentNullException("setId"); + if (values == null) + throw new ArgumentNullException("values"); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SRem, setId.ToUtf8Bytes(), values); + return SendExpectLong(cmdWithArgs); } public byte[] SPop(string setId) @@ -765,6 +1161,14 @@ public byte[] SPop(string setId) return SendExpectData(Commands.SPop, setId.ToUtf8Bytes()); } + public byte[][] SPop(string setId, int count) + { + if (setId == null) + throw new ArgumentNullException("setId"); + + return SendExpectMultiData(Commands.SPop, setId.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + public void SMove(string fromSetId, string toSetId, byte[] value) { if (fromSetId == null) @@ -775,20 +1179,20 @@ public void SMove(string fromSetId, string toSetId, byte[] value) SendExpectSuccess(Commands.SMove, fromSetId.ToUtf8Bytes(), toSetId.ToUtf8Bytes(), value); } - public int SCard(string setId) + public long SCard(string setId) { if (setId == null) throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.SCard, setId.ToUtf8Bytes()); + return SendExpectLong(Commands.SCard, setId.ToUtf8Bytes()); } - public int SIsMember(string setId, byte[] value) + public long SIsMember(string setId, byte[] value) { if (setId == null) throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.SIsMember, setId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.SIsMember, setId.ToUtf8Bytes(), value); } public byte[][] SInter(params string[] setIds) @@ -845,6 +1249,11 @@ public byte[] SRandMember(string setId) return SendExpectData(Commands.SRandMember, setId.ToUtf8Bytes()); } + public byte[][] SRandMember(string setId, int count) + { + return SendExpectMultiData(Commands.SRandMember, setId.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + #endregion @@ -859,11 +1268,16 @@ public byte[][] LRange(string listId, int startingFrom, int endingAt) } public byte[][] Sort(string listOrSetId, SortOptions sortOptions) + { + return SendExpectMultiData(SortPrepareArgs(listOrSetId, sortOptions)); + } + + private static byte[][] SortPrepareArgs(string listOrSetId, SortOptions sortOptions) { var cmdWithArgs = new List - { - Commands.Sort, listOrSetId.ToUtf8Bytes() - }; + { + Commands.Sort, listOrSetId.ToUtf8Bytes() + }; if (sortOptions.SortPattern != null) { @@ -899,35 +1313,64 @@ public byte[][] Sort(string listOrSetId, SortOptions sortOptions) cmdWithArgs.Add(Commands.Store); cmdWithArgs.Add(sortOptions.StoreAtKey.ToUtf8Bytes()); } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } - public int RPush(string listId, byte[] value) + public long RPush(string listId, byte[] value) { AssertListIdAndValue(listId, value); - return SendExpectInt(Commands.RPush, listId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.RPush, listId.ToUtf8Bytes(), value); } - public int RPushX(string listId, byte[] value) - { - throw new NotImplementedException(); - } + public long RPush(string listId, byte[][] values) + { + if (listId == null) + throw new ArgumentNullException("listId"); + if (values == null) + throw new ArgumentNullException("values"); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.RPush, listId.ToUtf8Bytes(), values); + return SendExpectLong(cmdWithArgs); + } - public int LPush(string listId, byte[] value) + public long RPushX(string listId, byte[] value) { AssertListIdAndValue(listId, value); - return SendExpectInt(Commands.LPush, listId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.RPushX, listId.ToUtf8Bytes(), value); } - public int LPushX(string listId, byte[] value) - { - throw new NotImplementedException(); - } + public long LPush(string listId, byte[] value) + { + AssertListIdAndValue(listId, value); - public void LTrim(string listId, int keepStartingFrom, int keepEndingAt) + return SendExpectLong(Commands.LPush, listId.ToUtf8Bytes(), value); + } + + public long LPush(string listId, byte[][] values) + { + if (listId == null) + throw new ArgumentNullException("listId"); + if (values == null) + throw new ArgumentNullException("values"); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.LPush, listId.ToUtf8Bytes(), values); + return SendExpectLong(cmdWithArgs); + } + + public long LPushX(string listId, byte[] value) + { + AssertListIdAndValue(listId, value); + + return SendExpectLong(Commands.LPushX, listId.ToUtf8Bytes(), value); + } + + public void LTrim(string listId, int keepStartingFrom, int keepEndingAt) { if (listId == null) throw new ArgumentNullException("listId"); @@ -935,20 +1378,20 @@ public void LTrim(string listId, int keepStartingFrom, int keepEndingAt) SendExpectSuccess(Commands.LTrim, listId.ToUtf8Bytes(), keepStartingFrom.ToUtf8Bytes(), keepEndingAt.ToUtf8Bytes()); } - public int LRem(string listId, int removeNoOfMatches, byte[] value) + public long LRem(string listId, int removeNoOfMatches, byte[] value) { if (listId == null) throw new ArgumentNullException("listId"); - return SendExpectInt(Commands.LRem, listId.ToUtf8Bytes(), removeNoOfMatches.ToUtf8Bytes(), value); + return SendExpectLong(Commands.LRem, listId.ToUtf8Bytes(), removeNoOfMatches.ToUtf8Bytes(), value); } - public int LLen(string listId) + public long LLen(string listId) { if (listId == null) throw new ArgumentNullException("listId"); - return SendExpectInt(Commands.LLen, listId.ToUtf8Bytes()); + return SendExpectLong(Commands.LLen, listId.ToUtf8Bytes()); } public byte[] LIndex(string listId, int listIndex) @@ -1005,16 +1448,15 @@ public byte[][] BLPop(string[] listIds, int timeOutSecs) { if (listIds == null) throw new ArgumentNullException("listIds"); - var args = new List(); - args.Add(Commands.BLPop); + var args = new List { Commands.BLPop }; args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); - args.Add(timeOutSecs.ToUtf8Bytes()); + args.Add(timeOutSecs.ToUtf8Bytes()); return SendExpectMultiData(args.ToArray()); } public byte[] BLPopValue(string listId, int timeOutSecs) { - var blockingResponse = BLPop(new[]{listId}, timeOutSecs); + var blockingResponse = BLPop(new[] { listId }, timeOutSecs); return blockingResponse.Length == 0 ? null : blockingResponse[1]; @@ -1040,16 +1482,15 @@ public byte[][] BRPop(string[] listIds, int timeOutSecs) { if (listIds == null) throw new ArgumentNullException("listIds"); - var args = new List(); - args.Add(Commands.BRPop); + var args = new List { Commands.BRPop }; args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); - args.Add(timeOutSecs.ToUtf8Bytes()); + args.Add(timeOutSecs.ToUtf8Bytes()); return SendExpectMultiData(args.ToArray()); } public byte[] BRPopValue(string listId, int timeOutSecs) { - var blockingResponse = BRPop(new[]{listId}, timeOutSecs); + var blockingResponse = BRPop(new[] { listId }, timeOutSecs); return blockingResponse.Length == 0 ? null : blockingResponse[1]; @@ -1066,9 +1507,9 @@ public byte[][] BRPopValue(string[] listIds, int timeOutSecs) public byte[] RPopLPush(string fromListId, string toListId) { if (fromListId == null) - throw new ArgumentNullException("fromListId"); + throw new ArgumentNullException(nameof(fromListId)); if (toListId == null) - throw new ArgumentNullException("toListId"); + throw new ArgumentNullException(nameof(toListId)); return SendExpectData(Commands.RPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes()); } @@ -1076,16 +1517,86 @@ public byte[] RPopLPush(string fromListId, string toListId) public byte[] BRPopLPush(string fromListId, string toListId, int timeOutSecs) { if (fromListId == null) - throw new ArgumentNullException("fromListId"); + throw new ArgumentNullException(nameof(fromListId)); if (toListId == null) - throw new ArgumentNullException("toListId"); + throw new ArgumentNullException(nameof(toListId)); - byte[][] result= SendExpectMultiData(Commands.BRPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + byte[][] result = SendExpectMultiData(Commands.BRPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); return result.Length == 0 ? null : result[1]; } #endregion + #region Sentinel + + public List> SentinelMasters() + { + var args = new List + { + Commands.Sentinel, + Commands.Masters, + }; + return SendExpectStringDictionaryList(args.ToArray()); + } + + public Dictionary SentinelMaster(string masterName) + { + var args = new List + { + Commands.Sentinel, + Commands.Master, + masterName.ToUtf8Bytes(), + }; + var results = SendExpectComplexResponse(args.ToArray()); + return ToDictionary(results); + } + + public List> SentinelSentinels(string masterName) + { + var args = new List + { + Commands.Sentinel, + Commands.Sentinels, + masterName.ToUtf8Bytes(), + }; + return SendExpectStringDictionaryList(args.ToArray()); + } + + public List> SentinelSlaves(string masterName) + { + var args = new List + { + Commands.Sentinel, + Commands.Slaves, + masterName.ToUtf8Bytes(), + }; + return SendExpectStringDictionaryList(args.ToArray()); + } + + public List SentinelGetMasterAddrByName(string masterName) + { + var args = new List + { + Commands.Sentinel, + Commands.GetMasterAddrByName, + masterName.ToUtf8Bytes(), + }; + return SendExpectMultiData(args.ToArray()).ToStringList(); + } + + public void SentinelFailover(string masterName) + { + var args = new List + { + Commands.Sentinel, + Commands.Failover, + masterName.ToUtf8Bytes(), + }; + + SendExpectSuccess(args.ToArray()); + } + + #endregion #region Sorted Set Operations @@ -1097,71 +1608,129 @@ private static void AssertSetIdAndValue(string setId, byte[] value) throw new ArgumentNullException("value"); } - public int ZAdd(string setId, double score, byte[] value) - { - AssertSetIdAndValue(setId, value); + public long ZAdd(string setId, double score, byte[] value) + { + AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.ZAdd, setId.ToUtf8Bytes(), score.ToFastUtf8Bytes(), value); - } + return SendExpectLong(Commands.ZAdd, setId.ToUtf8Bytes(), score.ToFastUtf8Bytes(), value); + } - public int ZAdd(string setId, long score, byte[] value) - { - AssertSetIdAndValue(setId, value); + public long ZAdd(string setId, long score, byte[] value) + { + AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.ZAdd, setId.ToUtf8Bytes(), score.ToUtf8Bytes(), value); - } + return SendExpectLong(Commands.ZAdd, setId.ToUtf8Bytes(), score.ToUtf8Bytes(), value); + } - public int ZRem(string setId, byte[] value) + public long ZAdd(string setId, List> pairs) + { + if (setId == null) + throw new ArgumentNullException("setId"); + if (pairs == null) + throw new ArgumentNullException("pairs"); + if (pairs.Count == 0) + throw new ArgumentOutOfRangeException("pairs"); + + var mergedBytes = new byte[2 + pairs.Count * 2][]; + mergedBytes[0] = Commands.ZAdd; + mergedBytes[1] = setId.ToUtf8Bytes(); + for (var i = 0; i < pairs.Count; i++) + { + mergedBytes[i * 2 + 2] = pairs[i].Value.ToFastUtf8Bytes(); + mergedBytes[i * 2 + 3] = pairs[i].Key; + } + return SendExpectLong(mergedBytes); + } + + public long ZAdd(string setId, List> pairs) + { + if (setId == null) + throw new ArgumentNullException("setId"); + if (pairs == null) + throw new ArgumentNullException("pairs"); + if (pairs.Count == 0) + throw new ArgumentOutOfRangeException("pairs"); + + var mergedBytes = new byte[2 + pairs.Count * 2][]; + mergedBytes[0] = Commands.ZAdd; + mergedBytes[1] = setId.ToUtf8Bytes(); + for (var i = 0; i < pairs.Count; i++) + { + mergedBytes[i * 2 + 2] = pairs[i].Value.ToUtf8Bytes(); + mergedBytes[i * 2 + 3] = pairs[i].Key; + } + return SendExpectLong(mergedBytes); + } + + public long ZRem(string setId, byte[] value) { AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.ZRem, setId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.ZRem, setId.ToUtf8Bytes(), value); } - public double ZIncrBy(string setId, double incrBy, byte[] value) - { - AssertSetIdAndValue(setId, value); + public long ZRem(string setId, byte[][] values) + { + if (setId == null) + throw new ArgumentNullException("setId"); + if (values == null) + throw new ArgumentNullException("values"); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZRem, setId.ToUtf8Bytes(), values); + return SendExpectLong(cmdWithArgs); + } + + public double ZIncrBy(string setId, double incrBy, byte[] value) + { + AssertSetIdAndValue(setId, value); - return SendExpectDouble(Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToFastUtf8Bytes(), value); - } + return SendExpectDouble(Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToFastUtf8Bytes(), value); + } - public double ZIncrBy(string setId, long incrBy, byte[] value) - { - AssertSetIdAndValue(setId, value); + public double ZIncrBy(string setId, long incrBy, byte[] value) + { + AssertSetIdAndValue(setId, value); - return SendExpectDouble(Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToUtf8Bytes(), value); - } + return SendExpectDouble(Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToUtf8Bytes(), value); + } - public int ZRank(string setId, byte[] value) + public long ZRank(string setId, byte[] value) { AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.ZRank, setId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.ZRank, setId.ToUtf8Bytes(), value); } - public int ZRevRank(string setId, byte[] value) + public long ZRevRank(string setId, byte[] value) { AssertSetIdAndValue(setId, value); - return SendExpectInt(Commands.ZRevRank, setId.ToUtf8Bytes(), value); + return SendExpectLong(Commands.ZRevRank, setId.ToUtf8Bytes(), value); } private byte[][] GetRange(byte[] commandBytes, string setId, int min, int max, bool withScores) + { + var args = GetRangeArgs(commandBytes, setId, min, max, withScores); + return SendExpectMultiData(args); + } + + private static byte[][] GetRangeArgs(byte[] commandBytes, string setId, int min, int max, bool withScores) { if (string.IsNullOrEmpty(setId)) throw new ArgumentNullException("setId"); var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; + { + commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() + }; if (withScores) { cmdWithArgs.Add(Commands.WithScores); } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } public byte[][] ZRange(string setId, int min, int max) @@ -1186,14 +1755,21 @@ public byte[][] ZRevRangeWithScores(string setId, int min, int max) private byte[][] GetRangeByScore(byte[] commandBytes, string setId, double min, double max, int? skip, int? take, bool withScores) + { + var args = GetRangeByScoreArgs(commandBytes, setId, min, max, skip, take, withScores); + return SendExpectMultiData(args); + } + + private static byte[][] GetRangeByScoreArgs(byte[] commandBytes, + string setId, double min, double max, int? skip, int? take, bool withScores) { if (setId == null) throw new ArgumentNullException("setId"); var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToFastUtf8Bytes(), max.ToFastUtf8Bytes() - }; + { + commandBytes, setId.ToUtf8Bytes(), min.ToFastUtf8Bytes(), max.ToFastUtf8Bytes() + }; if (skip.HasValue || take.HasValue) { @@ -1206,129 +1782,109 @@ private byte[][] GetRangeByScore(byte[] commandBytes, { cmdWithArgs.Add(Commands.WithScores); } + return cmdWithArgs.ToArray(); + } + + private byte[][] GetRangeByScore(byte[] commandBytes, + string setId, long min, long max, int? skip, int? take, bool withScores) + { + var args = GetRangeByScoreArgs(commandBytes, setId, min, max, skip, take, withScores); + return SendExpectMultiData(args); + } + + public byte[][] ZRangeByScore(string setId, double min, double max, int? skip, int? take) + { + return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, false); + } + + public byte[][] ZRangeByScore(string setId, long min, long max, int? skip, int? take) + { + return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, false); + } + + public byte[][] ZRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take) + { + return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, true); + } - return SendExpectMultiData(cmdWithArgs.ToArray()); - } - - private byte[][] GetRangeByScore(byte[] commandBytes, - string setId, long min, long max, int? skip, int? take, bool withScores) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; - - if (skip.HasValue || take.HasValue) - { - cmdWithArgs.Add(Commands.Limit); - cmdWithArgs.Add(skip.GetValueOrDefault(0).ToUtf8Bytes()); - cmdWithArgs.Add(take.GetValueOrDefault(0).ToUtf8Bytes()); - } - - if (withScores) - { - cmdWithArgs.Add(Commands.WithScores); - } - - return SendExpectMultiData(cmdWithArgs.ToArray()); - } - - public byte[][] ZRangeByScore(string setId, double min, double max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, false); - } - - public byte[][] ZRangeByScore(string setId, long min, long max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, false); - } - - public byte[][] ZRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, true); - } - - public byte[][] ZRangeByScoreWithScores(string setId, long min, long max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, true); - } - - public byte[][] ZRevRangeByScore(string setId, double min, double max, int? skip, int? take) - { - //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other - return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, false); - } - - public byte[][] ZRevRangeByScore(string setId, long min, long max, int? skip, int? take) - { - //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other - return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, false); - } - - public byte[][] ZRevRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take) - { - //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other - return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, true); - } - - public byte[][] ZRevRangeByScoreWithScores(string setId, long min, long max, int? skip, int? take) - { - //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other - return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, true); - } - - public int ZRemRangeByRank(string setId, int min, int max) + public byte[][] ZRangeByScoreWithScores(string setId, long min, long max, int? skip, int? take) + { + return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, true); + } + + public byte[][] ZRevRangeByScore(string setId, double min, double max, int? skip, int? take) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, false); + } + + public byte[][] ZRevRangeByScore(string setId, long min, long max, int? skip, int? take) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, false); + } + + public byte[][] ZRevRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, true); + } + + public byte[][] ZRevRangeByScoreWithScores(string setId, long min, long max, int? skip, int? take) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScore(Commands.ZRevRangeByScore, setId, max, min, skip, take, true); + } + + public long ZRemRangeByRank(string setId, int min, int max) { if (setId == null) throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.ZRemRangeByRank, setId.ToUtf8Bytes(), + return SendExpectLong(Commands.ZRemRangeByRank, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); } - public int ZRemRangeByScore(string setId, double fromScore, double toScore) - { - if (setId == null) - throw new ArgumentNullException("setId"); + public long ZRemRangeByScore(string setId, double fromScore, double toScore) + { + if (setId == null) + throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), - fromScore.ToFastUtf8Bytes(), toScore.ToFastUtf8Bytes()); - } + return SendExpectLong(Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), + fromScore.ToFastUtf8Bytes(), toScore.ToFastUtf8Bytes()); + } - public int ZRemRangeByScore(string setId, long fromScore, long toScore) - { - if (setId == null) - throw new ArgumentNullException("setId"); + public long ZRemRangeByScore(string setId, long fromScore, long toScore) + { + if (setId == null) + throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), - fromScore.ToUtf8Bytes(), toScore.ToUtf8Bytes()); - } + return SendExpectLong(Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), + fromScore.ToUtf8Bytes(), toScore.ToUtf8Bytes()); + } - public int ZCard(string setId) + public long ZCard(string setId) { if (setId == null) throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.ZCard, setId.ToUtf8Bytes()); + return SendExpectLong(Commands.ZCard, setId.ToUtf8Bytes()); } - public int ZCount(string setId, double min, double max) + public long ZCount(string setId, double min, double max) { if (setId == null) throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.ZCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + return SendExpectLong(Commands.ZCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); } - public int ZCount(string setId, long min, long max) + public long ZCount(string setId, long min, long max) { if (setId == null) throw new ArgumentNullException("setId"); - return SendExpectInt(Commands.ZCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + return SendExpectLong(Commands.ZCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); } public double ZScore(string setId, byte[] value) @@ -1339,24 +1895,85 @@ public double ZScore(string setId, byte[] value) return SendExpectDouble(Commands.ZScore, setId.ToUtf8Bytes(), value); } - public int ZUnionStore(string intoSetId, params string[] setIds) + public long ZUnionStore(string intoSetId, params string[] setIds) { var setIdsList = new List(setIds); setIdsList.Insert(0, setIds.Length.ToString()); setIdsList.Insert(0, intoSetId); var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, setIdsList.ToArray()); - return SendExpectInt(cmdWithArgs); + return SendExpectLong(cmdWithArgs); + } + + public long ZUnionStore(string intoSetId, string[] setIds, string[] args) + { + var totalArgs = new List(setIds); + totalArgs.Insert(0, setIds.Length.ToString()); + totalArgs.Insert(0, intoSetId); + totalArgs.AddRange(args); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, totalArgs.ToArray()); + return SendExpectLong(cmdWithArgs); } - public int ZInterStore(string intoSetId, params string[] setIds) + public long ZInterStore(string intoSetId, params string[] setIds) { var setIdsList = new List(setIds); setIdsList.Insert(0, setIds.Length.ToString()); setIdsList.Insert(0, intoSetId); var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, setIdsList.ToArray()); - return SendExpectInt(cmdWithArgs); + return SendExpectLong(cmdWithArgs); + } + + public long ZInterStore(string intoSetId, string[] setIds, string[] args) + { + var totalArgs = new List(setIds); + totalArgs.Insert(0, setIds.Length.ToString()); + totalArgs.Insert(0, intoSetId); + totalArgs.AddRange(args); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, totalArgs.ToArray()); + return SendExpectLong(cmdWithArgs); + } + + static byte[][] GetZRangeByLexArgs(string setId, string min, string max, int? skip, int? take) + { + if (setId == null) + throw new ArgumentNullException("setId"); + + var cmdWithArgs = new List + { + Commands.ZRangeByLex, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() + }; + + if (skip.HasValue || take.HasValue) + { + cmdWithArgs.Add(Commands.Limit); + cmdWithArgs.Add(skip.GetValueOrDefault(0).ToUtf8Bytes()); + cmdWithArgs.Add(take.GetValueOrDefault(0).ToUtf8Bytes()); + } + return cmdWithArgs.ToArray(); + } + public byte[][] ZRangeByLex(string setId, string min, string max, int? skip = null, int? take = null) + => SendExpectMultiData(GetZRangeByLexArgs(setId, min, max, skip, take)); + + public long ZLexCount(string setId, string min, string max) + { + if (setId == null) + throw new ArgumentNullException("setId"); + + return SendExpectLong( + Commands.ZLexCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + public long ZRemRangeByLex(string setId, string min, string max) + { + if (setId == null) + throw new ArgumentNullException("setId"); + + return SendExpectLong( + Commands.ZRemRangeByLex, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); } #endregion @@ -1364,104 +1981,131 @@ public int ZInterStore(string intoSetId, params string[] setIds) #region Hash Operations - private static void AssertHashIdAndKey(string hashId, byte[] key) + private static void AssertHashIdAndKey(object hashId, byte[] key) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); } - public int HSet(string hashId, byte[] key, byte[] value) + public long HSet(string hashId, byte[] key, byte[] value) + { + return HSet(hashId.ToUtf8Bytes(), key, value); + } + + public long HSet(byte[] hashId, byte[] key, byte[] value) { AssertHashIdAndKey(hashId, key); - return SendExpectInt(Commands.HSet, hashId.ToUtf8Bytes(), key, value); + return SendExpectLong(Commands.HSet, hashId, key, value); } - public int HSetNX(string hashId, byte[] key, byte[] value) + public long HSetNX(string hashId, byte[] key, byte[] value) { AssertHashIdAndKey(hashId, key); - return SendExpectInt(Commands.HSetNx, hashId.ToUtf8Bytes(), key, value); + return SendExpectLong(Commands.HSetNx, hashId.ToUtf8Bytes(), key, value); } public void HMSet(string hashId, byte[][] keys, byte[][] values) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); var cmdArgs = MergeCommandWithKeysAndValues(Commands.HMSet, hashId.ToUtf8Bytes(), keys, values); SendExpectSuccess(cmdArgs); } - public int HIncrby(string hashId, byte[] key, int incrementBy) + public long HIncrby(string hashId, byte[] key, int incrementBy) { AssertHashIdAndKey(hashId, key); - return SendExpectInt(Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); + return SendExpectLong(Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); } - public int HIncrby(string hashId, byte[] key, long incrementBy) + public long HIncrby(string hashId, byte[] key, long incrementBy) { AssertHashIdAndKey(hashId, key); - return SendExpectInt(Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); + return SendExpectLong(Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); } - public double HIncrbyFloat(string hashId, byte[] key, double incrementBy) - { - AssertHashIdAndKey(hashId, key); + public double HIncrbyFloat(string hashId, byte[] key, double incrementBy) + { + AssertHashIdAndKey(hashId, key); - return SendExpectDouble(Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString(CultureInfo.InvariantCulture).ToUtf8Bytes()); - } + return SendExpectDouble(Commands.HIncrByFloat, hashId.ToUtf8Bytes(), key, incrementBy.ToString(CultureInfo.InvariantCulture).ToUtf8Bytes()); + } - public byte[] HGet(string hashId, byte[] key) + public byte[] HGet(string hashId, byte[] key) + { + return HGet(hashId.ToUtf8Bytes(), key); + } + + public byte[] HGet(byte[] hashId, byte[] key) { AssertHashIdAndKey(hashId, key); - return SendExpectData(Commands.HGet, hashId.ToUtf8Bytes(), key); + return SendExpectData(Commands.HGet, hashId, key); } public byte[][] HMGet(string hashId, params byte[][] keys) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); if (keys.Length == 0) - throw new ArgumentNullException("keys"); + throw new ArgumentNullException(nameof(keys)); var cmdArgs = MergeCommandWithArgs(Commands.HMGet, hashId.ToUtf8Bytes(), keys); return SendExpectMultiData(cmdArgs); } - public int HDel(string hashId, byte[] key) + public long HDel(string hashId, byte[] key) + { + return HDel(hashId.ToUtf8Bytes(), key); + } + + public long HDel(byte[] hashId, byte[] key) { AssertHashIdAndKey(hashId, key); - return SendExpectInt(Commands.HDel, hashId.ToUtf8Bytes(), key); + return SendExpectLong(Commands.HDel, hashId, key); } - public int HExists(string hashId, byte[] key) + public long HDel(string hashId, byte[][] keys) + { + if (hashId == null) + throw new ArgumentNullException(nameof(hashId)); + if (keys == null) + throw new ArgumentNullException(nameof(keys)); + if (keys.Length == 0) + throw new ArgumentException(nameof(keys)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.HDel, hashId.ToUtf8Bytes(), keys); + return SendExpectLong(cmdWithArgs); + } + public long HExists(string hashId, byte[] key) { AssertHashIdAndKey(hashId, key); - return SendExpectInt(Commands.HExists, hashId.ToUtf8Bytes(), key); + return SendExpectLong(Commands.HExists, hashId.ToUtf8Bytes(), key); } - public int HLen(string hashId) + public long HLen(string hashId) { if (string.IsNullOrEmpty(hashId)) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); - return SendExpectInt(Commands.HLen, hashId.ToUtf8Bytes()); + return SendExpectLong(Commands.HLen, hashId.ToUtf8Bytes()); } public byte[][] HKeys(string hashId) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectMultiData(Commands.HKeys, hashId.ToUtf8Bytes()); } @@ -1469,7 +2113,7 @@ public byte[][] HKeys(string hashId) public byte[][] HVals(string hashId) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectMultiData(Commands.HVals, hashId.ToUtf8Bytes()); } @@ -1477,14 +2121,14 @@ public byte[][] HVals(string hashId) public byte[][] HGetAll(string hashId) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectMultiData(Commands.HGetAll, hashId.ToUtf8Bytes()); } - public int Publish(string toChannel, byte[] message) + public long Publish(string toChannel, byte[] message) { - return SendExpectInt(Commands.Publish, toChannel.ToUtf8Bytes(), message); + return SendExpectLong(Commands.Publish, toChannel.ToUtf8Bytes(), message); } public byte[][] ReceiveMessages() @@ -1492,10 +2136,15 @@ public byte[][] ReceiveMessages() return ReadMultiData(); } - public byte[][] Subscribe(params string[] toChannels) + public virtual IRedisSubscription CreateSubscription() + { + return new RedisSubscription(this); + } + + public byte[][] Subscribe(params string[] toChannels) { if (toChannels.Length == 0) - throw new ArgumentNullException("toChannels"); + throw new ArgumentNullException(nameof(toChannels)); var cmdWithArgs = MergeCommandWithArgs(Commands.Subscribe, toChannels); return SendExpectMultiData(cmdWithArgs); @@ -1510,7 +2159,7 @@ public byte[][] UnSubscribe(params string[] fromChannels) public byte[][] PSubscribe(params string[] toChannelsMatchingPatterns) { if (toChannelsMatchingPatterns.Length == 0) - throw new ArgumentNullException("toChannelsMatchingPatterns"); + throw new ArgumentNullException(nameof(toChannelsMatchingPatterns)); var cmdWithArgs = MergeCommandWithArgs(Commands.PSubscribe, toChannelsMatchingPatterns); return SendExpectMultiData(cmdWithArgs); @@ -1530,9 +2179,264 @@ public RedisPipelineCommand CreatePipelineCommand() #endregion + #region GEO Operations + + public long GeoAdd(string key, double longitude, double latitude, string member) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + if (member == null) + throw new ArgumentNullException(nameof(member)); + + return SendExpectLong(Commands.GeoAdd, key.ToUtf8Bytes(), longitude.ToUtf8Bytes(), latitude.ToUtf8Bytes(), member.ToUtf8Bytes()); + } + + public long GeoAdd(string key, params RedisGeo[] geoPoints) + { + var cmdWithArgs = GeoAddPrepareArgs(key, geoPoints); + return SendExpectLong(cmdWithArgs); + } + + private static byte[][] GeoAddPrepareArgs(string key, RedisGeo[] geoPoints) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + var members = new byte[geoPoints.Length * 3][]; + for (var i = 0; i < geoPoints.Length; i++) + { + var geoPoint = geoPoints[i]; + members[i * 3 + 0] = geoPoint.Longitude.ToUtf8Bytes(); + members[i * 3 + 1] = geoPoint.Latitude.ToUtf8Bytes(); + members[i * 3 + 2] = geoPoint.Member.ToUtf8Bytes(); + } + + return MergeCommandWithArgs(Commands.GeoAdd, key.ToUtf8Bytes(), members); + } + + public double GeoDist(string key, string fromMember, string toMember, string unit = null) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + return unit == null + ? SendExpectDouble(Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes()) + : SendExpectDouble(Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes(), unit.ToUtf8Bytes()); + } + + public string[] GeoHash(string key, params string[] members) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.GeoHash, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); + return SendExpectMultiData(cmdWithArgs).ToStringArray(); + } + + public List GeoPos(string key, params string[] members) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.GeoPos, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); + var data = SendExpectComplexResponse(cmdWithArgs); + return GeoPosParseResult(members, data); + } + private static List GeoPosParseResult(string[] members, RedisData data) + { + var to = new List(); + + for (var i = 0; i < members.Length; i++) + { + if (data.Children.Count <= i) + break; + + var entry = data.Children[i]; + + var children = entry.Children; + if (children.Count == 0) + continue; + + to.Add(new RedisGeo + { + Longitude = children[0].ToDouble(), + Latitude = children[1].ToDouble(), + Member = members[i], + }); + } + + return to; + } + + public List GeoRadius(string key, double longitude, double latitude, double radius, string unit, + bool withCoords = false, bool withDist = false, bool withHash = false, int? count = null, bool? asc = null) + { + var cmdWithArgs = GeoRadiusPrepareArgs(key, longitude, latitude, radius, unit, + withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = SendExpectMultiData(cmdWithArgs).ToStringArray(); + foreach (var member in members) + { + to.Add(new RedisGeoResult { Member = member }); + } + } + else + { + var data = SendExpectComplexResponse(cmdWithArgs); + GetRadiusParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + private static void GetRadiusParseResult(string unit, bool withCoords, bool withDist, bool withHash, List to, RedisData data) + { + foreach (var child in data.Children) + { + var i = 0; + var result = new RedisGeoResult { Unit = unit, Member = child.Children[i++].Data.FromUtf8Bytes() }; + + if (withDist) result.Distance = child.Children[i++].ToDouble(); + + if (withHash) result.Hash = child.Children[i++].ToInt64(); + + if (withCoords) + { + var children = child.Children[i].Children; + result.Longitude = children[0].ToDouble(); + result.Latitude = children[1].ToDouble(); + } + + to.Add(result); + } + } + + private static byte[][] GeoRadiusPrepareArgs(string key, double longitude, double latitude, double radius, string unit, + bool withCoords, bool withDist, bool withHash, int? count, bool? asc) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + var args = new List + { + longitude.ToUtf8Bytes(), + latitude.ToUtf8Bytes(), + radius.ToUtf8Bytes(), + Commands.GetUnit(unit), + }; + + if (withCoords) + args.Add(Commands.WithCoord); + if (withDist) + args.Add(Commands.WithDist); + if (withHash) + args.Add(Commands.WithHash); + + if (count != null) + { + args.Add(Commands.Count); + args.Add(count.Value.ToUtf8Bytes()); + } + + if (asc == true) + args.Add(Commands.Asc); + else if (asc == false) + args.Add(Commands.Desc); + + return MergeCommandWithArgs(Commands.GeoRadius, key.ToUtf8Bytes(), args.ToArray()); + } + + public List GeoRadiusByMember(string key, string member, double radius, string unit, + bool withCoords = false, bool withDist = false, bool withHash = false, int? count = null, bool? asc = null) + { + var cmdWithArgs = GeoRadiusByMemberPrepareArgs(key, member, radius, unit, withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = SendExpectMultiData(cmdWithArgs).ToStringArray(); + foreach (var x in members) + { + to.Add(new RedisGeoResult { Member = x }); + } + } + else + { + var data = SendExpectComplexResponse(cmdWithArgs); + GeoRadiusByMemberParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + private static void GeoRadiusByMemberParseResult(string unit, bool withCoords, bool withDist, bool withHash, List to, RedisData data) + { + foreach (var child in data.Children) + { + var i = 0; + var result = new RedisGeoResult { Unit = unit, Member = child.Children[i++].Data.FromUtf8Bytes() }; + + if (withDist) result.Distance = child.Children[i++].ToDouble(); + + if (withHash) result.Hash = child.Children[i++].ToInt64(); + + if (withCoords) + { + var children = child.Children[i].Children; + result.Longitude = children[0].ToDouble(); + result.Latitude = children[1].ToDouble(); + } + + to.Add(result); + } + } + + static byte[][] GeoRadiusByMemberPrepareArgs(string key, string member, double radius, string unit, + bool withCoords, bool withDist, bool withHash, int? count, bool? asc) + { + if (key == null) + throw new ArgumentNullException(nameof(key)); + + var args = new List + { + member.ToUtf8Bytes(), + radius.ToUtf8Bytes(), + Commands.GetUnit(unit), + }; + + if (withCoords) + args.Add(Commands.WithCoord); + if (withDist) + args.Add(Commands.WithDist); + if (withHash) + args.Add(Commands.WithHash); + + if (count != null) + { + args.Add(Commands.Count); + args.Add(count.Value.ToUtf8Bytes()); + } + + if (asc == true) + args.Add(Commands.Asc); + else if (asc == false) + args.Add(Commands.Desc); + + return MergeCommandWithArgs(Commands.GeoRadiusByMember, key.ToUtf8Bytes(), args.ToArray()); + } + + #endregion + internal bool IsDisposed { get; set; } - public void Dispose() + public bool IsManagedClient => ClientManager != null; + + public virtual void Dispose() { Dispose(true); GC.SuppressFinalize(this); @@ -1560,7 +2464,7 @@ protected virtual void Dispose(bool disposing) internal void DisposeConnection() { - if (IsDisposed) return; + if (IsDisposed) return; IsDisposed = true; if (socket == null) return; @@ -1584,17 +2488,22 @@ private void SafeConnectionClose() try { // workaround for a .net bug: http://support.microsoft.com/kb/821625 - if (Bstream != null) - Bstream.Close(); + bufferedReader?.Close(); + } + catch { } + try + { + sslStream?.Close(); } catch { } try { - if (socket != null) - socket.Close(); + socket?.Close(); } catch { } - Bstream = null; + + bufferedReader = null; + sslStream = null; socket = null; } } diff --git a/src/ServiceStack.Redis/RedisNativeClient_Utils.Async.cs b/src/ServiceStack.Redis/RedisNativeClient_Utils.Async.cs new file mode 100644 index 00000000..966dbbf1 --- /dev/null +++ b/src/ServiceStack.Redis/RedisNativeClient_Utils.Async.cs @@ -0,0 +1,552 @@ +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; +using ServiceStack.Text.Pools; +using System; +using System.Collections.Generic; +using System.IO; +using System.Net.Sockets; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisNativeClient + { + private async ValueTask SendExpectMultiDataAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return (await SendReceiveAsync(cmdWithBinaryArgs, ReadMultiDataAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteMultiBytesQueuedCommandAsync : (Action>>)null).ConfigureAwait(false)) + ?? TypeConstants.EmptyByteArrayArray; + } + + protected ValueTask SendWithoutReadAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + => SendReceiveAsync(cmdWithBinaryArgs, null, token, null, sendWithoutRead: true).Await(); + + private ValueTask SendExpectLongAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadLongAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteLongQueuedCommandAsync : (Action>>)null); + } + + private ValueTask SendExpectDoubleAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadDoubleAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteDoubleQueuedCommandAsync : (Action>>)null); + } + protected ValueTask SendExpectStringAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + => SendExpectDataAsync(token, cmdWithBinaryArgs).FromUtf8BytesAsync(); + + private ValueTask SendExpectSuccessAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + //Turn Action into Func Hack + Action>> completePipelineFn = null; + if (PipelineAsync != null) completePipelineFn = f => { PipelineAsync.CompleteVoidQueuedCommandAsync(ct => f(ct).Await()); }; + + return SendReceiveAsync(cmdWithBinaryArgs, ExpectSuccessFnAsync, token, completePipelineFn).Await(); + } + + private ValueTask SendExpectDataAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadDataAsync, token, PipelineAsync != null ? PipelineAsync.CompleteBytesQueuedCommandAsync : (Action>>)null); + } + + private ValueTask SendExpectCodeAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ExpectCodeAsync, token, PipelineAsync != null ? PipelineAsync.CompleteStringQueuedCommandAsync : (Action>>)null); + } + + private ValueTask SendExpectScanResultAsync(CancellationToken token, byte[] cmd, params byte[][] args) + { + var cmdWithArgs = MergeCommandWithArgs(cmd, args); + return SendExpectDeeplyNestedMultiDataAsync(token, cmdWithArgs).Await(multiData => ParseScanResult(multiData)); + } + + private ValueTask SendExpectDeeplyNestedMultiDataAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + => SendReceiveAsync(cmdWithBinaryArgs, ReadDeeplyNestedMultiDataAsync, token); + + private ValueTask ReadDeeplyNestedMultiDataAsync(CancellationToken token) + => ReadDeeplyNestedMultiDataItemAsync(token).Await(result => (object[])result); + + private async ValueTask ReadDeeplyNestedMultiDataItemAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + case '$': + return await ParseSingleLineAsync(string.Concat(char.ToString((char)c), s), token).ConfigureAwait(false); + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + var array = new object[count]; + for (int i = 0; i < count; i++) + { + array[i] = await ReadDeeplyNestedMultiDataItemAsync(token).ConfigureAwait(false); + } + + return array; + } + break; + + default: + return s; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + protected ValueTask SendExpectComplexResponseAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadComplexResponseAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteRedisDataQueuedCommandAsync : (Action>>)null); + } + + private async ValueTask ReadComplexResponseAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + case '$': + return new RedisData + { + Data = await ParseSingleLineAsync(string.Concat(char.ToString((char)c), s), token).ConfigureAwait(false) + }; + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + var ret = new RedisData { Children = new List() }; + for (var i = 0; i < count; i++) + { + ret.Children.Add(await ReadComplexResponseAsync(token).ConfigureAwait(false)); + } + + return ret; + } + break; + + default: + return new RedisData { Data = s.ToUtf8Bytes() }; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + private async ValueTask SendReceiveAsync(byte[][] cmdWithBinaryArgs, + Func> fn, + CancellationToken token, + Action>> completePipelineFn = null, + bool sendWithoutRead = false) + { + //if (TrackThread != null) + //{ + // if (TrackThread.Value.ThreadId != Thread.CurrentThread.ManagedThreadId) + // throw new InvalidAccessException(TrackThread.Value.ThreadId, TrackThread.Value.StackTrace); + //} + + var i = 0; + var didWriteToBuffer = false; + Exception originalEx = null; + + var firstAttempt = DateTime.UtcNow; + + while (true) + { + // this is deliberately *before* the try, so we never retry + // if we've been cancelled + token.ThrowIfCancellationRequested(); + try + { + if (TryConnectIfNeeded()) // TODO: asyncify + didWriteToBuffer = false; + + if (socket == null) + throw new RedisRetryableException("Socket is not connected"); + + if (!didWriteToBuffer) //only write to buffer once + { + WriteCommandToSendBuffer(cmdWithBinaryArgs); + didWriteToBuffer = true; + } + + if (PipelineAsync == null) //pipeline will handle flush if in pipeline + { + await FlushSendBufferAsync(token).ConfigureAwait(false); + } + else if (!sendWithoutRead) + { + if (completePipelineFn == null) + throw new NotSupportedException("Pipeline is not supported."); + + completePipelineFn(fn); + return default; + } + + var result = default(T); + if (fn != null) + result = await fn(token).ConfigureAwait(false); + + if (Pipeline == null) + ResetSendBuffer(); + + if (i > 0) + Interlocked.Increment(ref RedisState.TotalRetrySuccess); + + Interlocked.Increment(ref RedisState.TotalCommandsSent); + + return result; + } + catch (Exception outerEx) + { + if (log.IsDebugEnabled) + logDebug("SendReceive Exception: " + outerEx.Message); + + var retryableEx = outerEx as RedisRetryableException; + if (retryableEx == null && outerEx is RedisException + || outerEx is LicenseException) + { + ResetSendBuffer(); + throw; + } + + var ex = retryableEx ?? GetRetryableException(outerEx); + if (ex == null) + throw CreateConnectionError(originalEx ?? outerEx); + + if (originalEx == null) + originalEx = ex; + + var retry = DateTime.UtcNow - firstAttempt < retryTimeout; + if (!retry) + { + if (Pipeline == null) + ResetSendBuffer(); + + Interlocked.Increment(ref RedisState.TotalRetryTimedout); + throw CreateRetryTimeoutException(retryTimeout, originalEx); + } + + Interlocked.Increment(ref RedisState.TotalRetryCount); + await Task.Delay(GetBackOffMultiplier(++i), token).ConfigureAwait(false); + } + } + } + + internal ValueTask FlushSendBufferAsync(CancellationToken token) + { + if (currentBufferIndex > 0) + PushCurrentBuffer(); + + if (cmdBuffer.Count > 0) + { + OnBeforeFlush?.Invoke(); + + if (!Env.IsMono && sslStream == null) + { + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + { + var sb = StringBuilderCache.Allocate(); + foreach (var cmd in cmdBuffer) + { + if (sb.Length > 50) + break; + + sb.Append(Encoding.UTF8.GetString(cmd.Array, cmd.Offset, cmd.Count)); + } + logDebug("socket.Send: " + StringBuilderCache.ReturnAndFree(sb.Replace("\r\n", " ")).SafeSubstring(0, 50)); + } + + return new ValueTask(socket.SendAsync(cmdBuffer, SocketFlags.None)); + } + else + { + //Sending IList Throws 'Message to Large' SocketException in Mono + if (sslStream == null) + { + foreach (var segment in cmdBuffer) + { // TODO: what is modern Mono behavior here? + socket.Send(segment.Array, segment.Offset, segment.Count, SocketFlags.None); + } + } + else + { + return WriteAsync(sslStream, cmdBuffer, token); + } + } + } + + return default; + + static async ValueTask WriteAsync(Stream destination, List> buffer, CancellationToken token) + { + foreach (var segment in buffer) + { +#if ASYNC_MEMORY + await destination.WriteAsync(new ReadOnlyMemory(segment.Array, segment.Offset, segment.Count), token).ConfigureAwait(false); +#else + await destination.WriteAsync(segment.Array, segment.Offset, segment.Count, token).ConfigureAwait(false); +#endif + } + } + } + + + private ValueTask SafeReadByteAsync(in CancellationToken token, [CallerMemberName]string name = null) + { + AssertNotDisposed(); + + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + logDebug(name + "()"); + + return bufferedReader.ReadByteAsync(token); + } + + private async ValueTask ReadLineAsync(CancellationToken token) + { + AssertNotDisposed(); + + var sb = StringBuilderCache.Allocate(); + + int c; + while ((c = await bufferedReader.ReadByteAsync(token).ConfigureAwait(false)) != -1) + { + if (c == '\r') + continue; + if (c == '\n') + break; + sb.Append((char)c); + } + return StringBuilderCache.ReturnAndFree(sb); + } + + private async ValueTask ParseSingleLineAsync(string r, CancellationToken token) + { + if (log.IsDebugEnabled) + Log("R: {0}", r); + if (r.Length == 0) + throw CreateResponseError("Zero length response"); + + char c = r[0]; + if (c == '-') + throw CreateResponseError(r.StartsWith("-ERR") ? r.Substring(5) : r.Substring(1)); + + if (c == '$') + { + if (r == "$-1") + return null; + + if (int.TryParse(r.Substring(1), out var count)) + { + var retbuf = new byte[count]; + + var offset = 0; + while (count > 0) + { + var readCount = await bufferedReader.ReadAsync(retbuf, offset, count, token).ConfigureAwait(false); + if (readCount <= 0) + throw CreateResponseError("Unexpected end of Stream"); + + offset += readCount; + count -= readCount; + } + + if (await bufferedReader.ReadByteAsync(token).ConfigureAwait(false) != '\r' + || await bufferedReader.ReadByteAsync(token).ConfigureAwait(false) != '\n') + throw CreateResponseError("Invalid termination"); + + return retbuf; + } + throw CreateResponseError("Invalid length"); + } + + if (c == ':' || c == '+') + { + //match the return value + return r.Substring(1).ToUtf8Bytes(); + } + throw CreateResponseError("Unexpected reply: " + r); + } + + private ValueTask ReadDataAsync(CancellationToken token) + { + var pending = ReadLineAsync(token); + return pending.IsCompletedSuccessfully + ? ParseSingleLineAsync(pending.Result, token) + : Awaited(this, pending, token); + + static async ValueTask Awaited(RedisNativeClient @this, ValueTask pending, CancellationToken token) + { + var r = await pending.ConfigureAwait(false); + return await @this.ParseSingleLineAsync(r, token).ConfigureAwait(false); + } + } + + private async ValueTask ExpectCodeAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + + if (log.IsDebugEnabled) + Log((char)c + s); + + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + return s; + } + + private async ValueTask ReadMultiDataAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + // Some commands like BRPOPLPUSH may return Bulk Reply instead of Multi-bulk + case '$': + var t = new byte[2][]; + t[1] = await ParseSingleLineAsync(string.Concat(char.ToString((char)c), s), token).ConfigureAwait(false); + return t; + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + if (count == -1) + { + //redis is in an invalid state + return TypeConstants.EmptyByteArrayArray; + } + + var result = new byte[count][]; + + for (int i = 0; i < count; i++) + result[i] = await ReadDataAsync(token).ConfigureAwait(false); + + return result; + } + break; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + internal async ValueTask ReadLongAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + return ParseLong(c, await ReadLineAsync(token).ConfigureAwait(false)); + } + + private ValueTask ReadDoubleAsync(CancellationToken token) + => ReadDataAsync(token).Await(bytes => bytes == null ? double.NaN : ParseDouble(bytes)); + + internal ValueTask ExpectOkAsync(CancellationToken token) + => ExpectWordAsync(OK, token); + + internal ValueTask ExpectQueuedAsync(CancellationToken token) + => ExpectWordAsync(QUEUED, token); + + internal ValueTask ExpectSuccessFnAsync(CancellationToken token) + { + var pending = ExpectSuccessAsync(token); + return pending.IsCompletedSuccessfully ? default : Awaited(pending); + + static async ValueTask Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return 0; + } + } + + internal async ValueTask ExpectSuccessAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + + if (log.IsDebugEnabled) + Log((char)c + s); + + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") && s.Length >= 4 ? s.Substring(4) : s); + } + + + private async ValueTask ExpectWordAsync(string word, CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + + if (log.IsDebugEnabled) + Log((char)c + s); + + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + if (s != word) + throw CreateResponseError($"Expected '{word}' got '{s}'"); + } + + internal async ValueTask ReadMultiDataResultCountAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + if (c == '*') + { + if (int.TryParse(s, out var count)) + { + return count; + } + } + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisNativeClient_Utils.cs b/src/ServiceStack.Redis/RedisNativeClient_Utils.cs index f48c3d8c..c8b5d19c 100644 --- a/src/ServiceStack.Redis/RedisNativeClient_Utils.cs +++ b/src/ServiceStack.Redis/RedisNativeClient_Utils.cs @@ -10,79 +10,267 @@ // using System; +using System.Collections.Generic; using System.Diagnostics; using System.Globalization; using System.IO; +using System.Linq; using System.Net; +using System.Net.Security; using System.Net.Sockets; +using System.Runtime.CompilerServices; +using System.Security.Authentication; using System.Security.Cryptography; +using System.Security.Cryptography.X509Certificates; using System.Text; +using System.Threading; +using System.Threading.Tasks; using ServiceStack.Text; +using ServiceStack.Text.Pools; namespace ServiceStack.Redis { public partial class RedisNativeClient { + private const string OK = "OK"; + private const string QUEUED = "QUEUED"; + private static Timer UsageTimer; + + private static int __requestsPerHour = 0; + public static int RequestsPerHour => __requestsPerHour; + + private const int Unknown = -1; + public static int ServerVersionNumber { get; set; } + + private static long IdCounter = 0; + public long ClientId { get; } = Interlocked.Increment(ref IdCounter); + + private string LogPrefix = string.Empty; + private void logDebug(object message) => log.Debug(LogPrefix + message); + private void logError(object message) => log.Error(LogPrefix + message); + private void logError(object message, Exception ex) => log.Error(LogPrefix + message, ex); + + public int AssertServerVersionNumber() + { + if (ServerVersionNumber == 0) + AssertConnectedSocket(); + + return ServerVersionNumber; + } + + public static void DisposeTimers() + { + if (UsageTimer == null) return; + try + { + UsageTimer.Dispose(); + } + finally + { + UsageTimer = null; + } + } + private void Connect() { - socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) { + if (UsageTimer == null) + { + //Save Timer Resource for licensed usage + if (!LicenseUtils.HasLicensedFeature(LicenseFeature.Redis)) + { + UsageTimer = new Timer(delegate + { + __requestsPerHour = 0; + }, null, TimeSpan.FromMilliseconds(0), TimeSpan.FromHours(1)); + } + } + + socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) + { SendTimeout = SendTimeout, ReceiveTimeout = ReceiveTimeout }; +#if DEBUG + // allow sync commands during connect (we're OK with sync for connect; the + // DebugAllowSync feature being used here only impacts tests) + var oldDebugAllowSync = DebugAllowSync; + DebugAllowSync = true; +#endif try { - if (ConnectTimeout == 0) + if (log.IsDebugEnabled) + { + var type = ConnectTimeout <= 0 ? "sync" : "async"; + logDebug($"Attempting {type} connection to '{Host}:{Port}' (SEND {SendTimeout}, RECV {ReceiveTimeout} Timeouts)..."); + } + + if (ConnectTimeout <= 0) { socket.Connect(Host, Port); } else { - var connectResult = socket.BeginConnect(Host, Port, null, null); + var connectResult = IPAddress.TryParse(Host, out var ip) + ? socket.BeginConnect(ip, Port, null, null) + : socket.BeginConnect(Host, Port, null, null); connectResult.AsyncWaitHandle.WaitOne(ConnectTimeout, true); } if (!socket.Connected) { + if (log.IsDebugEnabled) + logDebug($"Socket failed connect to '{Host}:{Port}' (ConnectTimeout {ConnectTimeout})"); + socket.Close(); socket = null; + DeactivatedAt = DateTime.UtcNow; return; } - Bstream = new BufferedStream(new NetworkStream(socket), 16 * 1024); - if (Password != null) - SendExpectSuccess(Commands.Auth, Password.ToUtf8Bytes()); + if (log.IsDebugEnabled) + logDebug($"Socket connected to '{Host}:{Port}'"); + + Stream networkStream = new NetworkStream(socket); + + if (Ssl) + { + if (Env.IsMono) + { + //Mono doesn't support EncryptionPolicy + sslStream = new SslStream(networkStream, + leaveInnerStreamOpen: false, + userCertificateValidationCallback: RedisConfig.CertificateValidationCallback, + userCertificateSelectionCallback: RedisConfig.CertificateSelectionCallback); + } + else + { +#if NETSTANDARD || NET472 + sslStream = new SslStream(networkStream, + leaveInnerStreamOpen: false, + userCertificateValidationCallback: RedisConfig.CertificateValidationCallback, + userCertificateSelectionCallback: RedisConfig.CertificateSelectionCallback, + encryptionPolicy: EncryptionPolicy.RequireEncryption); +#else + var ctor = typeof(SslStream).GetConstructors() + .First(x => x.GetParameters().Length == 5); + + var policyType = AssemblyUtils.FindType("System.Net.Security.EncryptionPolicy"); + var policyValue = Enum.Parse(policyType, "RequireEncryption"); + + sslStream = (SslStream)ctor.Invoke(new[] { + networkStream, + false, + RedisConfig.CertificateValidationCallback, + RedisConfig.CertificateSelectionCallback, + policyValue, + }); +#endif + } + +#if NETSTANDARD || NET472 + var task = sslStream.AuthenticateAsClientAsync(Host); + if (ConnectTimeout > 0) + { + task.Wait(ConnectTimeout); + } + else + { + task.Wait(); + } +#else + if (SslProtocols != null) + { + sslStream.AuthenticateAsClient(Host, new X509CertificateCollection(), + SslProtocols ?? System.Security.Authentication.SslProtocols.None, checkCertificateRevocation: true); + } + else + { + sslStream.AuthenticateAsClient(Host); + } +#endif - db = 0; - var ipEndpoint = socket.LocalEndPoint as IPEndPoint; - clientPort = ipEndpoint != null ? ipEndpoint.Port : -1; + if (!sslStream.IsEncrypted) + throw new Exception($"Could not establish an encrypted connection to '{Host}:{Port}'"); + + networkStream = sslStream; + } + + bufferedReader = new BufferedReader(networkStream, 16 * 1024); + + if (!string.IsNullOrEmpty(Password)) + SendUnmanagedExpectSuccess(Commands.Auth, Password.ToUtf8Bytes()); + + if (db != 0) + SendUnmanagedExpectSuccess(Commands.Select, db.ToUtf8Bytes()); + + if (Client != null) + SendUnmanagedExpectSuccess(Commands.Client, Commands.SetName, Client.ToUtf8Bytes()); + + try + { + if (ServerVersionNumber == 0) + { + ServerVersionNumber = RedisConfig.AssumeServerVersion.GetValueOrDefault(0); + if (ServerVersionNumber <= 0) + { + var parts = ServerVersion.Split('.'); + var version = int.Parse(parts[0]) * 1000; + if (parts.Length > 1) + version += int.Parse(parts[1]) * 100; + if (parts.Length > 2) + version += int.Parse(parts[2]); + + ServerVersionNumber = version; + } + } + } + catch (Exception) + { + //Twemproxy doesn't support the INFO command so automatically closes the socket + //Fallback to ServerVersionNumber=Unknown then try re-connecting + ServerVersionNumber = Unknown; + Connect(); + return; + } + + clientPort = socket.LocalEndPoint is IPEndPoint ipEndpoint ? ipEndpoint.Port : -1; lastCommand = null; lastSocketException = null; LastConnectedAtTimestamp = Stopwatch.GetTimestamp(); + OnConnected(); + if (ConnectionFilter != null) - { ConnectionFilter(this); - } } - catch (SocketException ex) + catch (SocketException) { - if (socket != null) - socket.Close(); - socket = null; - - HadExceptions = true; - var throwEx = new RedisException("could not connect to redis Instance at " + Host + ":" + Port, ex); - log.Error(throwEx.Message, ex); - throw throwEx; + logError(ErrorConnect.Fmt(Host, Port)); + throw; + } + finally + { +#if DEBUG + DebugAllowSync = oldDebugAllowSync; +#endif } } + public static string ErrorConnect = "Could not connect to redis Instance at {0}:{1}"; + + public virtual void OnConnected() + { + } + protected string ReadLine() { - var sb = new StringBuilder(); + AssertNotDisposed(); + AssertNotAsyncOnly(); + + var sb = StringBuilderCache.Allocate(); int c; - while ((c = Bstream.ReadByte()) != -1) + while ((c = bufferedReader.ReadByte()) != -1) { if (c == '\r') continue; @@ -90,11 +278,47 @@ protected string ReadLine() break; sb.Append((char)c); } - return sb.ToString(); + return StringBuilderCache.ReturnAndFree(sb); + } + + public bool HasConnected => socket != null; + + public bool IsSocketConnected() + { + if (socket == null) + return false; + var part1 = socket.Poll(1000, SelectMode.SelectRead); + var part2 = socket.Available == 0; + return !(part1 & part2); } - private bool AssertConnectedSocket() + internal bool AssertConnectedSocket() { + try + { + TryConnectIfNeeded(); + var isConnected = socket != null; + return isConnected; + } + catch (SocketException ex) + { + logError(ErrorConnect.Fmt(Host, Port)); + + socket?.Close(); + + socket = null; + + DeactivatedAt = DateTime.UtcNow; + var message = Host + ":" + Port; + var throwEx = new RedisException(message, ex); + logError(throwEx.Message, ex); + throw throwEx; + } + } + + private bool TryConnectIfNeeded() + { + bool didConnect = false; if (LastConnectedAtTimestamp > 0) { var now = Stopwatch.GetTimestamp(); @@ -102,66 +326,71 @@ private bool AssertConnectedSocket() if (socket == null || (elapsedSecs > IdleTimeOutSecs && !socket.IsConnected())) { - return Reconnect(); + Reconnect(); + didConnect = true; } LastConnectedAtTimestamp = now; } if (socket == null) { - var previousDb = db; Connect(); - if (previousDb != DefaultDb) this.Db = previousDb; + didConnect = true; } - var isConnected = socket != null; - - return isConnected; + return didConnect; } private bool Reconnect() { - var previousDb = db; - SafeConnectionClose(); - Connect(); //sets db to 0 - - if (previousDb != DefaultDb) this.Db = previousDb; + Connect(); //sets db return socket != null; } - private bool HandleSocketException(SocketException ex) + private RedisResponseException CreateResponseError(string error) { - HadExceptions = true; - log.Error("SocketException: ", ex); + DeactivatedAt = DateTime.UtcNow; - lastSocketException = ex; + if (RedisConfig.EnableVerboseLogging) + { + var safeLastCommand = string.IsNullOrEmpty(Password) + ? lastCommand + : (lastCommand ?? "").Replace(Password, ""); - // timeout? - socket.Close(); - socket = null; + if (!string.IsNullOrEmpty(safeLastCommand)) + error = $"{error}, LastCommand:'{safeLastCommand}', srcPort:{clientPort}"; + } - return false; + var throwEx = new RedisResponseException(error); + logError(error); + return throwEx; } - private RedisResponseException CreateResponseError(string error) + private RedisRetryableException CreateNoMoreDataError() { - HadExceptions = true; - var throwEx = new RedisResponseException( - string.Format("{0}, sPort: {1}, LastCommand: {2}", - error, clientPort, lastCommand)); - log.Error(throwEx.Message); + Reconnect(); + return CreateRetryableResponseError("No more data"); + } + + private RedisRetryableException CreateRetryableResponseError(string error) + { + string safeLastCommand = string.IsNullOrEmpty(Password) ? lastCommand : (lastCommand ?? "").Replace(Password, ""); + + var throwEx = new RedisRetryableException( + $"[{DateTime.UtcNow:HH:mm:ss.fff}] {error}, sPort: {clientPort}, LastCommand: {safeLastCommand}"); + logError(throwEx.Message); throw throwEx; } - private RedisException CreateConnectionError() + private RedisException CreateConnectionError(Exception originalEx) { - HadExceptions = true; + DeactivatedAt = DateTime.UtcNow; var throwEx = new RedisException( - string.Format("Unable to Connect: sPort: {0}", - clientPort), lastSocketException); - log.Error(throwEx.Message); + $"[{DateTime.UtcNow:HH:mm:ss.fff}] Unable to Connect: sPort: {clientPort}{(originalEx != null ? ", Error: " + originalEx.Message + "\n" + originalEx.StackTrace : "")}", + originalEx ?? lastSocketException); + logError(throwEx.Message); throw throwEx; } @@ -183,31 +412,45 @@ private static byte[] GetCmdBytes(char cmdPrefix, int noOfLines) } /// - /// Command to set multuple binary safe arguments + /// Command to set multiple binary safe arguments /// /// /// - protected bool SendCommand(params byte[][] cmdWithBinaryArgs) + protected void WriteCommandToSendBuffer(params byte[][] cmdWithBinaryArgs) { - if (!AssertConnectedSocket()) return false; - - try + if (Pipeline == null && Transaction == null) { + Interlocked.Increment(ref __requestsPerHour); + if (__requestsPerHour % 20 == 0) + LicenseUtils.AssertValidUsage(LicenseFeature.Redis, QuotaType.RequestsPerHour, __requestsPerHour); + } + + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) CmdLog(cmdWithBinaryArgs); - //Total command lines count - WriteAllToSendBuffer(cmdWithBinaryArgs); + //Total command lines count + WriteAllToSendBuffer(cmdWithBinaryArgs); + } - //pipeline will handle flush, if pipelining is turned on - if (Pipeline == null) - FlushSendBuffer(); - } - catch (SocketException ex) + /// + /// Send command outside of managed Write Buffer + /// + /// + protected void SendUnmanagedExpectSuccess(params byte[][] cmdWithBinaryArgs) + { + var bytes = GetCmdBytes('*', cmdWithBinaryArgs.Length); + + foreach (var safeBinaryValue in cmdWithBinaryArgs) { - cmdBuffer.Clear(); - return HandleSocketException(ex); + bytes = bytes.Combine(GetCmdBytes('$', safeBinaryValue.Length), safeBinaryValue, endData); } - return true; + + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + logDebug("stream.Write: " + Encoding.UTF8.GetString(bytes, 0, Math.Min(bytes.Length, 50)).Replace("\r\n"," ").SafeSubstring(0,50)); + + SendDirectToSocket(new ArraySegment(bytes, 0, bytes.Length)); + + ExpectSuccess(); } public void WriteAllToSendBuffer(params byte[][] cmdWithBinaryArgs) @@ -222,7 +465,9 @@ public void WriteAllToSendBuffer(params byte[][] cmdWithBinaryArgs) } } - readonly System.Collections.Generic.IList> cmdBuffer = new System.Collections.Generic.List>(); + // trated as List rather than IList to avoid allocs during foreach + readonly List> cmdBuffer = new List>(); + byte[] currentBuffer = BufferPool.GetBuffer(); int currentBufferIndex; @@ -237,7 +482,7 @@ public void WriteToSendBuffer(byte[] cmdBytes) var bytesCopied = 0; while (bytesCopied < cmdBytes.Length) { - var copyOfBytes = BufferPool.GetBuffer(); + var copyOfBytes = BufferPool.GetBuffer(cmdBytes.Length); var bytesToCopy = Math.Min(cmdBytes.Length - bytesCopied, copyOfBytes.Length); Buffer.BlockCopy(cmdBytes, bytesCopied, copyOfBytes, 0, bytesToCopy); cmdBuffer.Add(new ArraySegment(copyOfBytes, 0, bytesToCopy)); @@ -247,7 +492,7 @@ public void WriteToSendBuffer(byte[] cmdBytes) private bool CouldAddToCurrentBuffer(byte[] cmdBytes) { - if (cmdBytes.Length + currentBufferIndex < BufferPool.BufferLength) + if (cmdBytes.Length + currentBufferIndex < RedisConfig.BufferLength) { Buffer.BlockCopy(cmdBytes, 0, currentBuffer, currentBufferIndex, cmdBytes.Length); currentBufferIndex += cmdBytes.Length; @@ -263,25 +508,87 @@ private void PushCurrentBuffer() currentBufferIndex = 0; } - public void FlushSendBuffer() + public Action OnBeforeFlush { get; set; } + + internal void FlushAndResetSendBuffer() + { + FlushSendBuffer(); + ResetSendBuffer(); + } + + internal void FlushSendBuffer() { if (currentBufferIndex > 0) PushCurrentBuffer(); - if (!Env.IsMono) + if (cmdBuffer.Count > 0) + { + if (OnBeforeFlush != null) + OnBeforeFlush(); + + if (!Env.IsMono && sslStream == null) + { + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + { + var sb = StringBuilderCache.Allocate(); + foreach (var cmd in cmdBuffer) + { + if (sb.Length > 50) + break; + + sb.Append(Encoding.UTF8.GetString(cmd.Array, cmd.Offset, cmd.Count)); + } + logDebug("socket.Send: " + StringBuilderCache.ReturnAndFree(sb.Replace("\r\n", " ")).SafeSubstring(0,50)); + } + + socket.Send(cmdBuffer); //Optimized for Windows + } + else + { + //Sending IList Throws 'Message to Large' SocketException in Mono + foreach (var segment in cmdBuffer) + { + SendDirectToSocket(segment); + } + } + } + } + + private void SendDirectToSocket(ArraySegment segment) + { + if (sslStream == null) { - socket.Send(cmdBuffer); //Optimized for Windows + socket.Send(segment.Array, segment.Offset, segment.Count, SocketFlags.None); } else { - //Sendling IList Throws 'Message to Large' SocketException in Mono - foreach (var segment in cmdBuffer) + sslStream.Write(segment.Array, segment.Offset, segment.Count); + } + } + + /// + /// Called before returning pooled client/socket + /// + internal void Activate(bool newClient=false) + { + if (!newClient) + { + //Drain any existing buffers + ResetSendBuffer(); + bufferedReader?.Reset(); + if (socket?.Available > 0) { - var buffer = segment.Array; - socket.Send(buffer, segment.Offset, segment.Count, SocketFlags.None); + logDebug($"Draining existing socket of {socket.Available} bytes"); + var buff = new byte[socket.Available]; + socket.Receive(buff, SocketFlags.None); } } - ResetSendBuffer(); + Active = true; + } + + internal void Deactivate() + { + Active = false; } /// @@ -298,174 +605,316 @@ public void ResetSendBuffer() } } - private int SafeReadByte() + [MethodImpl(MethodImplOptions.AggressiveInlining)] + void AssertNotDisposed() { - return Bstream.ReadByte(); + if (bufferedReader == null) + throw new ObjectDisposedException($"Redis Client {ClientId} is Disposed"); } - protected void SendExpectSuccess(params byte[][] cmdWithBinaryArgs) + private int SafeReadByte(string name) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + AssertNotDisposed(); + AssertNotAsyncOnly(); - if (Pipeline != null) - { - Pipeline.CompleteVoidQueuedCommand(ExpectSuccess); - return; - } - ExpectSuccess(); + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + logDebug(name + "()"); + + return bufferedReader.ReadByte(); } - protected int SendExpectInt(params byte[][] cmdWithBinaryArgs) + internal TrackThread? TrackThread; + + partial void AssertNotAsyncOnly([CallerMemberName] string caller = default); +#if DEBUG + public bool DebugAllowSync { get; set; } = true; + partial void AssertNotAsyncOnly(string caller) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + // for unit tests only; asserts that we're not meant to be in an async context + if (!DebugAllowSync) + throw new InvalidOperationException("Unexpected synchronous operation detected from '" + caller + "'"); + } +#endif - if (Pipeline != null) + + protected T SendReceive(byte[][] cmdWithBinaryArgs, + Func fn, + Action> completePipelineFn = null, + bool sendWithoutRead = false) + { + if (Pipeline is null) AssertNotAsyncOnly(); + if (TrackThread != null) { - Pipeline.CompleteIntQueuedCommand(ReadInt); - return default(int); + if (TrackThread.Value.ThreadId != Thread.CurrentThread.ManagedThreadId) + throw new InvalidAccessException(TrackThread.Value.ThreadId, TrackThread.Value.StackTrace); } - return ReadInt(); - } + + var i = 0; + var didWriteToBuffer = false; + Exception originalEx = null; - protected long SendExpectLong(params byte[][] cmdWithBinaryArgs) - { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + var firstAttempt = DateTime.UtcNow; - if (Pipeline != null) + while (true) { - Pipeline.CompleteLongQueuedCommand(ReadLong); - return default(long); + try + { + if (TryConnectIfNeeded()) + didWriteToBuffer = false; + + if (socket == null) + throw new RedisRetryableException("Socket is not connected"); + + if (!didWriteToBuffer) //only write to buffer once + { + WriteCommandToSendBuffer(cmdWithBinaryArgs); + didWriteToBuffer = true; + } + + if (Pipeline == null) //pipeline will handle flush if in pipeline + { + FlushSendBuffer(); + } + else if (!sendWithoutRead) + { + if (completePipelineFn == null) + throw new NotSupportedException("Pipeline is not supported."); + + completePipelineFn(fn); + return default(T); + } + + var result = default(T); + if (fn != null) + result = fn(); + + if (Pipeline == null) + ResetSendBuffer(); + + if (i > 0) + Interlocked.Increment(ref RedisState.TotalRetrySuccess); + + Interlocked.Increment(ref RedisState.TotalCommandsSent); + + return result; + } + catch (Exception outerEx) + { + if (log.IsDebugEnabled) + logDebug("SendReceive Exception: " + outerEx.Message); + + var retryableEx = outerEx as RedisRetryableException; + if (retryableEx == null && outerEx is RedisException + || outerEx is LicenseException) + { + ResetSendBuffer(); + throw; + } + + var ex = retryableEx ?? GetRetryableException(outerEx); + if (ex == null) + throw CreateConnectionError(originalEx ?? outerEx); + + if (originalEx == null) + originalEx = ex; + + var retry = DateTime.UtcNow - firstAttempt < retryTimeout; + if (!retry) + { + if (Pipeline == null) + ResetSendBuffer(); + + Interlocked.Increment(ref RedisState.TotalRetryTimedout); + throw CreateRetryTimeoutException(retryTimeout, originalEx); + } + + Interlocked.Increment(ref RedisState.TotalRetryCount); + TaskUtils.Sleep(GetBackOffMultiplier(++i)); + } } - return ReadLong(); } - protected byte[] SendExpectData(params byte[][] cmdWithBinaryArgs) + private RedisException CreateRetryTimeoutException(TimeSpan retryTimeout, Exception originalEx) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + DeactivatedAt = DateTime.UtcNow; + var message = "Exceeded timeout of {0}".Fmt(retryTimeout); + logError(message); + return new RedisException(message, originalEx); + } - if (Pipeline != null) - { - Pipeline.CompleteBytesQueuedCommand(ReadData); + private Exception GetRetryableException(Exception outerEx) + { + // several stream commands wrap SocketException in IOException + var socketEx = outerEx.InnerException as SocketException + ?? outerEx as SocketException; + + if (socketEx == null) return null; - } - return ReadData(); + + logError("SocketException in SendReceive, retrying...", socketEx); + lastSocketException = socketEx; + + socket?.Close(); + + socket = null; + return socketEx; } - protected string SendExpectString(params byte[][] cmdWithBinaryArgs) + private static int GetBackOffMultiplier(int i) { - var bytes = SendExpectData(cmdWithBinaryArgs); - return bytes.FromUtf8Bytes(); + var nextTryMs = (2 ^ i) * RedisConfig.BackOffMultiplier; + return nextTryMs; } - protected double SendExpectDouble(params byte[][] cmdWithBinaryArgs) + protected void SendWithoutRead(params byte[][] cmdWithBinaryArgs) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + SendReceive(cmdWithBinaryArgs, null, null, sendWithoutRead: true); + } - if (Pipeline != null) - { - Pipeline.CompleteDoubleQueuedCommand(ReadDouble); - return Double.NaN; - } + protected void SendExpectSuccess(params byte[][] cmdWithBinaryArgs) + { + //Turn Action into Func Hack + var completePipelineFn = Pipeline != null + ? f => { Pipeline.CompleteVoidQueuedCommand(() => f()); } + : (Action>)null; - return ReadDouble(); + SendReceive(cmdWithBinaryArgs, ExpectSuccessFn, completePipelineFn); } - public double ReadDouble() + protected long SendExpectLong(params byte[][] cmdWithBinaryArgs) { - var bytes = ReadData(); - return (bytes == null) ? double.NaN : ParseDouble(bytes); + return SendReceive(cmdWithBinaryArgs, ReadLong, Pipeline != null ? Pipeline.CompleteLongQueuedCommand : (Action>)null); } - public static double ParseDouble(byte[] doubleBytes) + protected byte[] SendExpectData(params byte[][] cmdWithBinaryArgs) { - var doubleString = Encoding.UTF8.GetString(doubleBytes); - - double d; - double.TryParse(doubleString, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out d); + return SendReceive(cmdWithBinaryArgs, ReadData, Pipeline != null ? Pipeline.CompleteBytesQueuedCommand : (Action>)null); + } - return d; + protected double SendExpectDouble(params byte[][] cmdWithBinaryArgs) + { + return SendReceive(cmdWithBinaryArgs, ReadDouble, Pipeline != null ? Pipeline.CompleteDoubleQueuedCommand : (Action>)null); } protected string SendExpectCode(params byte[][] cmdWithBinaryArgs) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + return SendReceive(cmdWithBinaryArgs, ExpectCode, Pipeline != null ? Pipeline.CompleteStringQueuedCommand : (Action>)null); + } - if (Pipeline != null) - { - Pipeline.CompleteStringQueuedCommand(ExpectCode); - return null; - } + protected byte[][] SendExpectMultiData(params byte[][] cmdWithBinaryArgs) + { + return SendReceive(cmdWithBinaryArgs, ReadMultiData, Pipeline != null ? Pipeline.CompleteMultiBytesQueuedCommand : (Action>)null) + ?? TypeConstants.EmptyByteArrayArray; + } - return ExpectCode(); + protected object[] SendExpectDeeplyNestedMultiData(params byte[][] cmdWithBinaryArgs) + { + return SendReceive(cmdWithBinaryArgs, ReadDeeplyNestedMultiData); } - protected byte[][] SendExpectMultiData(params byte[][] cmdWithBinaryArgs) + protected RedisData SendExpectComplexResponse(params byte[][] cmdWithBinaryArgs) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + return SendReceive(cmdWithBinaryArgs, ReadComplexResponse, Pipeline != null ? Pipeline.CompleteRedisDataQueuedCommand : (Action>)null); + } - if (Pipeline != null) + protected List> SendExpectStringDictionaryList(params byte[][] cmdWithBinaryArgs) + { + var results = SendExpectComplexResponse(cmdWithBinaryArgs); + var to = new List>(); + foreach (var data in results.Children) { - Pipeline.CompleteMultiBytesQueuedCommand(ReadMultiData); - return new byte[0][]; + if (data.Children != null) + { + var map = ToDictionary(data); + to.Add(map); + } } - return ReadMultiData(); + return to; } - protected object[] SendExpectDeeplyNestedMultiData(params byte[][] cmdWithBinaryArgs) + private static Dictionary ToDictionary(RedisData data) { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); + string key = null; + var map = new Dictionary(); - if (Pipeline != null) + if (data.Children == null) + throw new ArgumentNullException("data.Children"); + + for (var i = 0; i < data.Children.Count; i++) { - throw new NotSupportedException("Pipeline is not supported."); + var bytes = data.Children[i].Data; + if (i % 2 == 0) + { + key = bytes.FromUtf8Bytes(); + } + else + { + if (key == null) + throw new RedisResponseException("key == null, i={0}, data.Children[i] = {1}".Fmt(i, data.Children[i].ToRedisText().Dump())); + + var val = bytes.FromUtf8Bytes(); + map[key] = val; + } } + return map; + } - return ReadDeeplyNestedMultiData(); + protected string SendExpectString(params byte[][] cmdWithBinaryArgs) + { + var bytes = SendExpectData(cmdWithBinaryArgs); + return bytes.FromUtf8Bytes(); } - [Conditional("DEBUG")] protected void Log(string fmt, params object[] args) { - log.DebugFormat("{0}", string.Format(fmt, args).Trim()); + if (!RedisConfig.EnableVerboseLogging) + return; + + log.DebugFormat(LogPrefix + "{0}", string.Format(fmt, args).Trim()); } - [Conditional("DEBUG")] protected void CmdLog(byte[][] args) { - var sb = new StringBuilder(); + var sb = StringBuilderCache.Allocate(); foreach (var arg in args) { + var strArg = arg.FromUtf8Bytes(); + if (strArg == Password) continue; + if (sb.Length > 0) sb.Append(" "); - sb.Append(arg.FromUtf8Bytes()); + sb.Append(strArg); + + if (sb.Length > 100) + break; } - this.lastCommand = sb.ToString(); + this.lastCommand = StringBuilderCache.ReturnAndFree(sb); if (this.lastCommand.Length > 100) { this.lastCommand = this.lastCommand.Substring(0, 100) + "..."; } - log.Debug("S: " + this.lastCommand); + logDebug("S: " + this.lastCommand); + } + + //Turn Action into Func Hack + protected long ExpectSuccessFn() + { + ExpectSuccess(); + return 0; } protected void ExpectSuccess() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ExpectSuccess)); if (c == -1) - throw CreateResponseError("No more data"); + throw CreateNoMoreDataError(); var s = ReadLine(); - Log((char)c + s); + if (log.IsDebugEnabled) + Log((char)c + s); if (c == '-') throw CreateResponseError(s.StartsWith("ERR") && s.Length >= 4 ? s.Substring(4) : s); @@ -473,30 +922,32 @@ protected void ExpectSuccess() private void ExpectWord(string word) { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ExpectWord)); if (c == -1) - throw CreateResponseError("No more data"); + throw CreateNoMoreDataError(); var s = ReadLine(); - Log((char)c + s); + if (log.IsDebugEnabled) + Log((char)c + s); if (c == '-') throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); if (s != word) - throw CreateResponseError(string.Format("Expected '{0}' got '{1}'", word, s)); + throw CreateResponseError($"Expected '{word}' got '{s}'"); } private string ExpectCode() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ExpectCode)); if (c == -1) - throw CreateResponseError("No more data"); + throw CreateNoMoreDataError(); var s = ReadLine(); - Log((char)c + s); + if (log.IsDebugEnabled) + Log((char)c + s); if (c == '-') throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); @@ -506,45 +957,27 @@ private string ExpectCode() internal void ExpectOk() { - ExpectWord("OK"); + ExpectWord(OK); } internal void ExpectQueued() { - ExpectWord("QUEUED"); + ExpectWord(QUEUED); } - public int ReadInt() + public long ReadLong() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadLong)); if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); + throw CreateNoMoreDataError(); - Log("R: {0}", s); - - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); - - if (c == ':' || c == '$')//really strange why ZRANK needs the '$' here - { - int i; - if (int.TryParse(s, out i)) - return i; - } - throw CreateResponseError("Unknown reply on integer response: " + c + s); + return ParseLong(c, ReadLine()); } - public long ReadLong() + private long ParseLong(int c, string s) { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - - Log("R: {0}", s); + if (log.IsDebugEnabled) + Log("R: {0}", s); if (c == '-') throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); @@ -555,7 +988,20 @@ public long ReadLong() if (long.TryParse(s, out i)) return i; } - throw CreateResponseError("Unknown reply on integer response: " + c + s); + throw CreateResponseError("Unknown reply on integer response: " + ((char)c) + s); // c here is the protocol prefix + } + + public double ReadDouble() + { + var bytes = ReadData(); + return (bytes == null) ? double.NaN : ParseDouble(bytes); + } + + public static double ParseDouble(byte[] doubleBytes) + { + var doubleString = Encoding.UTF8.GetString(doubleBytes); + double.TryParse(doubleString, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var d); + return d; } private byte[] ReadData() @@ -566,9 +1012,10 @@ private byte[] ReadData() private byte[] ParseSingleLine(string r) { - Log("R: {0}", r); + if (log.IsDebugEnabled) + Log("R: {0}", r); if (r.Length == 0) - throw CreateResponseError("Zero length respose"); + throw CreateResponseError("Zero length response"); char c = r[0]; if (c == '-') @@ -578,16 +1025,15 @@ private byte[] ParseSingleLine(string r) { if (r == "$-1") return null; - int count; - if (Int32.TryParse(r.Substring(1), out count)) + if (int.TryParse(r.Substring(1), out var count)) { var retbuf = new byte[count]; var offset = 0; while (count > 0) { - var readCount = Bstream.Read(retbuf, offset, count); + var readCount = bufferedReader.Read(retbuf, offset, count); if (readCount <= 0) throw CreateResponseError("Unexpected end of Stream"); @@ -595,7 +1041,7 @@ private byte[] ParseSingleLine(string r) count -= readCount; } - if (Bstream.ReadByte() != '\r' || Bstream.ReadByte() != '\n') + if (bufferedReader.ReadByte() != '\r' || bufferedReader.ReadByte() != '\n') throw CreateResponseError("Invalid termination"); return retbuf; @@ -603,7 +1049,7 @@ private byte[] ParseSingleLine(string r) throw CreateResponseError("Invalid length"); } - if (c == ':') + if (c == ':' || c == '+') { //match the return value return r.Substring(1).ToUtf8Bytes(); @@ -613,12 +1059,13 @@ private byte[] ParseSingleLine(string r) private byte[][] ReadMultiData() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadMultiData)); if (c == -1) - throw CreateResponseError("No more data"); + throw CreateNoMoreDataError(); var s = ReadLine(); - Log("R: {0}", s); + if (log.IsDebugEnabled) + Log("R: {0}", s); switch (c) { @@ -632,13 +1079,12 @@ private byte[][] ReadMultiData() throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); case '*': - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { if (count == -1) { //redis is in an invalid state - return new byte[0][]; + return TypeConstants.EmptyByteArrayArray; } var result = new byte[count][]; @@ -651,22 +1097,24 @@ private byte[][] ReadMultiData() break; } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } private object[] ReadDeeplyNestedMultiData() { - return (object[])ReadDeeplyNestedMultiDataItem(); + var result = ReadDeeplyNestedMultiDataItem(); + return (object[])result; } private object ReadDeeplyNestedMultiDataItem() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadDeeplyNestedMultiDataItem)); if (c == -1) - throw CreateResponseError("No more data"); + throw CreateNoMoreDataError(); var s = ReadLine(); - Log("R: {0}", s); + if (log.IsDebugEnabled) + Log("R: {0}", s); switch (c) { @@ -677,8 +1125,7 @@ private object ReadDeeplyNestedMultiDataItem() throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); case '*': - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { var array = new object[count]; for (int i = 0; i < count; i++) @@ -694,36 +1141,77 @@ private object ReadDeeplyNestedMultiDataItem() return s; } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + internal RedisData ReadComplexResponse() + { + int c = SafeReadByte(nameof(ReadComplexResponse)); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = ReadLine(); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + case '$': + return new RedisData + { + Data = ParseSingleLine(string.Concat(char.ToString((char)c), s)) + }; + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + var ret = new RedisData { Children = new List() }; + for (var i = 0; i < count; i++) + { + ret.Children.Add(ReadComplexResponse()); + } + + return ret; + } + break; + + default: + return new RedisData { Data = s.ToUtf8Bytes() }; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } internal int ReadMultiDataResultCount() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadMultiDataResultCount)); if (c == -1) - throw CreateResponseError("No more data"); + throw CreateNoMoreDataError(); var s = ReadLine(); - Log("R: {0}", s); + if (log.IsDebugEnabled) + Log("R: {0}", s); if (c == '-') throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); if (c == '*') { - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { return count; } } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } private static void AssertListIdAndValue(string listId, byte[] value) { if (listId == null) - throw new ArgumentNullException("listId"); + throw new ArgumentNullException(nameof(listId)); if (value == null) - throw new ArgumentNullException("value"); + throw new ArgumentNullException(nameof(value)); } private static byte[][] MergeCommandWithKeysAndValues(byte[] cmd, byte[][] keys, byte[][] values) @@ -742,13 +1230,13 @@ private static byte[][] MergeCommandWithKeysAndValues(byte[][] firstParams, byte[][] keys, byte[][] values) { if (keys == null || keys.Length == 0) - throw new ArgumentNullException("keys"); + throw new ArgumentNullException(nameof(keys)); if (values == null || values.Length == 0) - throw new ArgumentNullException("values"); + throw new ArgumentNullException(nameof(values)); if (keys.Length != values.Length) throw new ArgumentException("The number of values must be equal to the number of keys"); - var keyValueStartIndex = (firstParams != null) ? firstParams.Length : 0; + var keyValueStartIndex = firstParams?.Length ?? 0; var keysAndValuesLength = keys.Length * 2 + keyValueStartIndex; var keysAndValues = new byte[keysAndValuesLength][]; @@ -803,7 +1291,7 @@ protected byte[][] ConvertToBytes(string[] keys) for (var i = 0; i < keys.Length; i++) { var key = keys[i]; - keyBytes[i] = key != null ? key.ToUtf8Bytes() : new byte[0]; + keyBytes[i] = key != null ? key.ToUtf8Bytes() : TypeConstants.EmptyByteArray; } return keyBytes; } @@ -811,9 +1299,9 @@ protected byte[][] ConvertToBytes(string[] keys) protected byte[][] MergeAndConvertToBytes(string[] keys, string[] args) { if (keys == null) - keys = new string[0]; + keys = TypeConstants.EmptyStringArray; if (args == null) - args = new string[0]; + args = TypeConstants.EmptyStringArray; var keysLength = keys.Length; var merged = new string[keysLength + args.Length]; @@ -825,28 +1313,28 @@ protected byte[][] MergeAndConvertToBytes(string[] keys, string[] args) return ConvertToBytes(merged); } - public int EvalInt(string luaBody, int numberKeysInArgs, params byte[][] keys) + public long EvalInt(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); - return SendExpectInt(cmdArgs); + return SendExpectLong(cmdArgs); } - public int EvalShaInt(string sha1, int numberKeysInArgs, params byte[][] keys) + public long EvalShaInt(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); - return SendExpectInt(cmdArgs); + return SendExpectLong(cmdArgs); } public string EvalStr(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectData(cmdArgs).FromUtf8Bytes(); @@ -855,7 +1343,7 @@ public string EvalStr(string luaBody, int numberKeysInArgs, params byte[][] keys public string EvalShaStr(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectData(cmdArgs).FromUtf8Bytes(); @@ -864,7 +1352,7 @@ public string EvalShaStr(string sha1, int numberKeysInArgs, params byte[][] keys public byte[][] Eval(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectMultiData(cmdArgs); @@ -873,26 +1361,43 @@ public byte[][] Eval(string luaBody, int numberKeysInArgs, params byte[][] keys) public byte[][] EvalSha(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectMultiData(cmdArgs); } + public RedisData EvalCommand(string luaBody, int numberKeysInArgs, params byte[][] keys) + { + if (luaBody == null) + throw new ArgumentNullException(nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); + return RawCommand(cmdArgs); + } + + public RedisData EvalShaCommand(string sha1, int numberKeysInArgs, params byte[][] keys) + { + if (sha1 == null) + throw new ArgumentNullException(nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); + return RawCommand(cmdArgs); + } + public string CalculateSha1(string luaBody) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); byte[] buffer = Encoding.UTF8.GetBytes(luaBody); - var cryptoTransformSHA1 = new SHA1CryptoServiceProvider(); - return BitConverter.ToString(cryptoTransformSHA1.ComputeHash(buffer)).Replace("-", ""); + return BitConverter.ToString(buffer.ToSha1Hash()).Replace("-", ""); } public byte[] ScriptLoad(string luaBody) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Script, Commands.Load, luaBody.ToUtf8Bytes()); return SendExpectData(cmdArgs); @@ -916,4 +1421,4 @@ public void ScriptKill() } -} \ No newline at end of file +} diff --git a/src/ServiceStack.Redis/RedisPipeline.cs b/src/ServiceStack.Redis/RedisPipeline.cs deleted file mode 100644 index 1169438e..00000000 --- a/src/ServiceStack.Redis/RedisPipeline.cs +++ /dev/null @@ -1,45 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - public class RedisPipelineCommand - { - private readonly RedisNativeClient client; - private int cmdCount; - - public RedisPipelineCommand(RedisNativeClient client) - { - this.client = client; - } - - public void WriteCommand(params byte[][] cmdWithBinaryArgs) - { - client.WriteAllToSendBuffer(cmdWithBinaryArgs); - cmdCount++; - } - - public List ReadAllAsInts() - { - var results = new List(); - while (cmdCount-- > 0) - { - results.Add(client.ReadInt()); - } - - return results; - } - - public bool ReadAllAsIntsHaveSuccess() - { - var allResults = ReadAllAsInts(); - return allResults.All(x => x == RedisNativeClient.Success); - } - - public void Flush() - { - client.FlushSendBuffer(); - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisPubSubServer.cs b/src/ServiceStack.Redis/RedisPubSubServer.cs new file mode 100644 index 00000000..a1d4fd17 --- /dev/null +++ b/src/ServiceStack.Redis/RedisPubSubServer.cs @@ -0,0 +1,612 @@ +using System; +using System.Diagnostics; +using System.Threading; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public class RedisPubSubServer : IRedisPubSubServer + { + private static ILog Log = LogManager.GetLogger(typeof(RedisPubSubServer)); + private DateTime serverTimeAtStart; + private Stopwatch startedAt; + + public TimeSpan? HeartbeatInterval = TimeSpan.FromSeconds(10); + public TimeSpan HeartbeatTimeout = TimeSpan.FromSeconds(30); + private long lastHeartbeatTicks; + private Timer heartbeatTimer; + + public Action OnInit { get; set; } + public Action OnStart { get; set; } + public Action OnHeartbeatSent { get; set; } + public Action OnHeartbeatReceived { get; set; } + public Action OnStop { get; set; } + public Action OnDispose { get; set; } + + /// + /// Callback fired on each message received, handle with (channel, msg) => ... + /// + public Action OnMessage { get; set; } + public Action OnMessageBytes { get; set; } + + public Action OnControlCommand { get; set; } + public Action OnUnSubscribe { get; set; } + public Action OnEvent { get; set; } + public Action OnError { get; set; } + public Action OnFailover { get; set; } + public bool IsSentinelSubscription { get; set; } + + readonly Random rand = new Random(Environment.TickCount); + + private int doOperation = Operation.NoOp; + + private long timesStarted = 0; + private long noOfErrors = 0; + private int noOfContinuousErrors = 0; + private string lastExMsg = null; + private int status; + private Thread bgThread; //Subscription controller thread + private long bgThreadCount = 0; + + private const int NO = 0; + private const int YES = 1; + + private int autoRestart = YES; + public bool AutoRestart + { + get => Interlocked.CompareExchange(ref autoRestart, 0, 0) == YES; + set => Interlocked.CompareExchange(ref autoRestart, value ? YES : NO, autoRestart); + } + + public DateTime CurrentServerTime => new DateTime(serverTimeAtStart.Ticks + startedAt.Elapsed.Ticks, DateTimeKind.Utc); + + public long BgThreadCount => Interlocked.CompareExchange(ref bgThreadCount, 0, 0); + + public const string AllChannelsWildCard = "*"; + public IRedisClientsManager ClientsManager { get; set; } + public string[] Channels { get; set; } + public string[] ChannelsMatching { get; set; } + public TimeSpan? WaitBeforeNextRestart { get; set; } + + public RedisPubSubServer(IRedisClientsManager clientsManager, params string[] channels) + { + this.ClientsManager = clientsManager; + this.Channels = channels; + startedAt = Stopwatch.StartNew(); + + var failoverHost = clientsManager as IRedisFailover; + failoverHost?.OnFailover.Add(HandleFailover); + } + + public IRedisPubSubServer Start() + { + AutoRestart = true; + + if (Interlocked.CompareExchange(ref status, 0, 0) == Status.Started) + { + //Start any stopped worker threads + OnStart?.Invoke(); + + return this; + } + if (Interlocked.CompareExchange(ref status, 0, 0) == Status.Disposed) + throw new ObjectDisposedException("RedisPubSubServer has been disposed"); + + //Only 1 thread allowed past + if (Interlocked.CompareExchange(ref status, Status.Starting, Status.Stopped) == Status.Stopped) //Should only be 1 thread past this point + { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} Stopped] Start()> Stopped -> Starting"); + + var initErrors = 0; + bool hasInit = false; + while (!hasInit) + { + try + { + Init(); + hasInit = true; + } + catch (Exception ex) + { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] Start().Init()> Exception: {ex.Message}"); + OnError?.Invoke(ex); + SleepBackOffMultiplier(initErrors++); + } + } + + try + { + SleepBackOffMultiplier(Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); + + OnStart?.Invoke(); + + //Don't kill us if we're the thread that's retrying to Start() after a failure. + if (bgThread != Thread.CurrentThread) + { + KillBgThreadIfExists(); + + bgThread = new Thread(RunLoop) + { + IsBackground = true, + Name = "RedisPubSubServer " + Interlocked.Increment(ref bgThreadCount) + }; + bgThread.Start(); + if (Log.IsDebugEnabled) + Log.Debug("Started Background Thread: " + bgThread.Name); + } + else + { + if (Log.IsDebugEnabled) + Log.Debug("Retrying RunLoop() on Thread: " + bgThread.Name); + RunLoop(); + } + } + catch (Exception ex) + { + OnError?.Invoke(ex); + } + } + + return this; + } + + private void Init() + { + using (var redis = ClientsManager.GetReadOnlyClient()) + { + startedAt = Stopwatch.StartNew(); + serverTimeAtStart = IsSentinelSubscription + ? DateTime.UtcNow + : redis.GetServerTime(); + } + + DisposeHeartbeatTimer(); + + if (HeartbeatInterval != null) + { + heartbeatTimer = new Timer(SendHeartbeat, null, + TimeSpan.FromMilliseconds(0), HeartbeatInterval.GetValueOrDefault()); + } + + Interlocked.CompareExchange(ref lastHeartbeatTicks, DateTime.UtcNow.Ticks, lastHeartbeatTicks); + + OnInit?.Invoke(); + } + + void SendHeartbeat(object state) + { + var currentStatus = Interlocked.CompareExchange(ref status, 0, 0); + if (currentStatus != Status.Started) + return; + + if (DateTime.UtcNow - new DateTime(lastHeartbeatTicks) < HeartbeatInterval.GetValueOrDefault()) + return; + + OnHeartbeatSent?.Invoke(); + + NotifyAllSubscribers(ControlCommand.Pulse); + + if (DateTime.UtcNow - new DateTime(lastHeartbeatTicks) > HeartbeatTimeout) + { + currentStatus = Interlocked.CompareExchange(ref status, 0, 0); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {Status.GetStatus(currentStatus)}] SendHeartbeat()> Exceeded HeartbeatTimeout"); + if (currentStatus == Status.Started) + { + Restart(); + } + } + } + + void Pulse() + { + Interlocked.CompareExchange(ref lastHeartbeatTicks, DateTime.UtcNow.Ticks, lastHeartbeatTicks); + + OnHeartbeatReceived?.Invoke(); + } + + private void DisposeHeartbeatTimer() + { + if (heartbeatTimer == null) + return; + + try + { + if (Log.IsDebugEnabled) + Log.Debug("RedisPubServer.DisposeHeartbeatTimer()"); + + heartbeatTimer.Dispose(); + } + catch (Exception ex) + { + OnError?.Invoke(ex); + } + heartbeatTimer = null; + } + + private IRedisClient masterClient; + private void RunLoop() + { + if (Interlocked.CompareExchange(ref status, Status.Started, Status.Starting) != Status.Starting) return; + Interlocked.Increment(ref timesStarted); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} Started] RunLoop().Stop> Starting -> Started, timesStarted: {timesStarted}"); + + try + { + //RESET + while (Interlocked.CompareExchange(ref status, 0, 0) == Status.Started) + { + using var redis = ClientsManager.GetReadOnlyClient(); + masterClient = redis; + + //Record that we had a good run... + Interlocked.CompareExchange(ref noOfContinuousErrors, 0, noOfContinuousErrors); + + using var subscription = redis.CreateSubscription(); + subscription.OnUnSubscribe = HandleUnSubscribe; + + if (OnMessageBytes != null) + { + bool IsCtrlMessage(byte[] msg) + { + if (msg.Length < 4) + return false; + return msg[0] == 'C' && msg[1] == 'T' && msg[0] == 'R' && msg[0] == 'L'; + } + + ((RedisSubscription)subscription).OnMessageBytes = (channel, msg) => { + if (IsCtrlMessage(msg)) + return; + + OnMessageBytes(channel, msg); + }; + } + + subscription.OnMessage = (channel, msg) => + { + if (string.IsNullOrEmpty(msg)) + return; + + var ctrlMsg = msg.LeftPart(':'); + if (ctrlMsg == ControlCommand.Control) + { + var op = Interlocked.CompareExchange(ref doOperation, Operation.NoOp, doOperation); + + var msgType = msg.IndexOf(':') >= 0 + ? msg.RightPart(':') + : null; + + OnControlCommand?.Invoke(msgType ?? Operation.GetName(op)); + + switch (op) + { + case Operation.Stop: + if (Log.IsDebugEnabled) + Log.Debug("Stop Command Issued"); + + var holdStatus = GetStatus(); + + Interlocked.CompareExchange(ref status, Status.Stopping, Status.Started); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {holdStatus}] RunLoop().Stop> Started -> Stopping"); + try + { + if (Log.IsDebugEnabled) + Log.Debug("UnSubscribe From All Channels..."); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().Stop> subscription.UnSubscribeFromAllChannels()"); + + // ReSharper disable once AccessToDisposedClosure + subscription.UnSubscribeFromAllChannels(); //Un block thread. + } + finally + { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().Stop> Stopping -> Stopped"); + Interlocked.CompareExchange(ref status, Status.Stopped, Status.Stopping); + } + return; + + case Operation.Reset: + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().Reset> subscription.UnSubscribeFromAllChannels()"); + + // ReSharper disable once AccessToDisposedClosure + subscription.UnSubscribeFromAllChannels(); //Un block thread. + return; + } + + switch (msgType) + { + case ControlCommand.Pulse: + Pulse(); + break; + } + } + else + { + OnMessage(channel, msg); + } + }; + + //blocks thread + if (ChannelsMatching != null && ChannelsMatching.Length > 0) + subscription.SubscribeToChannelsMatching(ChannelsMatching); + else + subscription.SubscribeToChannels(Channels); + + masterClient = null; + } + + OnStop?.Invoke(); + } + catch (Exception ex) + { + lastExMsg = ex.Message; + Interlocked.Increment(ref noOfErrors); + Interlocked.Increment(ref noOfContinuousErrors); + + var holdStatus = GetStatus(); + + if (Interlocked.CompareExchange(ref status, Status.Stopped, Status.Started) != Status.Started) + Interlocked.CompareExchange(ref status, Status.Stopped, Status.Stopping); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {holdStatus}] RunLoop().Stop> Started|Stopping -> Stopped"); + + OnStop?.Invoke(); + + OnError?.Invoke(ex); + } + + if (AutoRestart && Interlocked.CompareExchange(ref status, 0, 0) != Status.Disposed) + { + if (WaitBeforeNextRestart != null) + TaskUtils.Sleep(WaitBeforeNextRestart.Value); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().AutoRestart> Start()"); + Start(); + } + } + + public void Stop() + { + Stop(shouldRestart:false); + } + + private void Stop(bool shouldRestart) + { + AutoRestart = shouldRestart; + + if (Interlocked.CompareExchange(ref status, 0, 0) == Status.Disposed) + throw new ObjectDisposedException("RedisPubSubServer has been disposed"); + + if (Interlocked.CompareExchange(ref status, Status.Stopping, Status.Started) == Status.Started) + { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] Stop()> Started -> Stopping"); + + if (Log.IsDebugEnabled) + Log.Debug("Stopping RedisPubSubServer..."); + + //Unblock current bg thread by issuing StopCommand + SendControlCommand(Operation.Stop); + } + } + + private void SendControlCommand(int operation) + { + Interlocked.CompareExchange(ref doOperation, operation, doOperation); + NotifyAllSubscribers(); + } + + private void NotifyAllSubscribers(string commandType=null) + { + var msg = ControlCommand.Control; + if (commandType != null) + msg += ":" + commandType; + + try + { + using var redis = ClientsManager.GetClient(); + foreach (var channel in Channels) + { + redis.PublishMessage(channel, msg); + } + } + catch (Exception ex) + { + OnError?.Invoke(ex); + Log.WarnFormat("Could not send '{0}' message to bg thread: {1}", msg, ex.Message); + } + } + + private void HandleFailover(IRedisClientsManager clientsManager) + { + try + { + OnFailover?.Invoke(this); + + if (masterClient != null) + { + //New thread-safe client with same connection info as connected master + using var currentlySubscribedClient = ((RedisClient)masterClient).CloneClient(); + Interlocked.CompareExchange(ref doOperation, Operation.Reset, doOperation); + foreach (var channel in Channels) + { + currentlySubscribedClient.PublishMessage(channel, ControlCommand.Control); + } + } + else + { + Restart(); + } + } + catch (Exception ex) + { + OnError?.Invoke(ex); + Log.Warn("Error trying to UnSubscribeFromChannels in OnFailover. Restarting...", ex); + Restart(); + } + } + + void HandleUnSubscribe(string channel) + { + if (Log.IsDebugEnabled) + Log.Debug("OnUnSubscribe: " + channel); + + OnUnSubscribe?.Invoke(channel); + } + + public void Restart() + { + Stop(shouldRestart:true); + } + + private void KillBgThreadIfExists() + { + if (bgThread != null && bgThread.IsAlive) + { + //give it a small chance to die gracefully + if (!bgThread.Join(500)) + { +#if !NETCORE + //Ideally we shouldn't get here, but lets try our hardest to clean it up + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] KillBgThreadIfExists()> bgThread.Interrupt()"); + Log.Warn("Interrupting previous Background Thread: " + bgThread.Name); + bgThread.Interrupt(); + if (!bgThread.Join(TimeSpan.FromSeconds(3))) + { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] KillBgThreadIfExists()> bgThread.Abort()"); + Log.Warn(bgThread.Name + " just wont die, so we're now aborting it..."); + bgThread.Abort(); + } +#endif + } + bgThread = null; + } + } + + private void SleepBackOffMultiplier(int continuousErrorsCount) + { + if (continuousErrorsCount == 0) return; + const int maxSleepMs = 60 * 1000; + + //exponential/random retry back-off. + var nextTry = Math.Min( + rand.Next((int)Math.Pow(continuousErrorsCount, 3), (int)Math.Pow(continuousErrorsCount + 1, 3) + 1), + maxSleepMs); + + if (Log.IsDebugEnabled) + Log.DebugFormat("Sleeping for {0}ms after {1} continuous errors", nextTry, continuousErrorsCount); + + TaskUtils.Sleep(nextTry); + } + + public static class Operation //dep-free copy of WorkerOperation + { + public const int NoOp = 0; + public const int Stop = 1; + public const int Reset = 2; + public const int Restart = 3; + + public static string GetName(int op) + { + switch (op) + { + case NoOp: + return "NoOp"; + case Stop: + return "Stop"; + case Reset: + return "Reset"; + case Restart: + return "Restart"; + default: + return null; + } + } + } + + public static class ControlCommand + { + public const string Control = "CTRL"; + public const string Pulse = "PULSE"; + } + + class Status //dep-free copy of WorkerStatus + { + public const int Disposed = -1; + public const int Stopped = 0; + public const int Stopping = 1; + public const int Starting = 2; + public const int Started = 3; + + public static string GetStatus(int status) + { + return status switch { + Disposed => nameof(Disposed), + Stopped => nameof(Stopped), + Stopping => nameof(Stopping), + Starting => nameof(Starting), + Started => nameof(Started), + _ => throw new NotSupportedException("Unknown status: " + status) + }; + } + } + + public string GetStatus() => Status.GetStatus(Interlocked.CompareExchange(ref status, 0, 0)); + + public string GetStatsDescription() + { + var sb = StringBuilderCache.Allocate(); + sb.AppendLine("==============="); + sb.AppendLine("Current Status: " + GetStatus()); + sb.AppendLine("Times Started: " + Interlocked.CompareExchange(ref timesStarted, 0, 0)); + sb.AppendLine("Num of Errors: " + Interlocked.CompareExchange(ref noOfErrors, 0, 0)); + sb.AppendLine("Num of Continuous Errors: " + Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); + sb.AppendLine("Last ErrorMsg: " + lastExMsg); + sb.AppendLine("==============="); + return StringBuilderCache.ReturnAndFree(sb); + } + + public virtual void Dispose() + { + if (Interlocked.CompareExchange(ref status, 0, 0) == Status.Disposed) + return; + + if (Log.IsDebugEnabled) + Log.Debug("RedisPubServer.Dispose()..."); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] Dispose()>"); + + Stop(); + + var holdStatus = GetStatus(); + + if (Interlocked.CompareExchange(ref status, Status.Disposed, Status.Stopped) != Status.Stopped) + Interlocked.CompareExchange(ref status, Status.Disposed, Status.Stopping); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {holdStatus}] Dispose()> -> Disposed"); + + try + { + OnDispose?.Invoke(); + } + catch (Exception ex) + { + Log.Error("Error OnDispose(): ", ex); + } + + try + { + Thread.Sleep(100); //give it a small chance to die gracefully + KillBgThreadIfExists(); + } + catch (Exception ex) + { + OnError?.Invoke(ex); + } + + DisposeHeartbeatTimer(); + } + } +} diff --git a/src/ServiceStack.Redis/RedisResolver.cs b/src/ServiceStack.Redis/RedisResolver.cs new file mode 100644 index 00000000..a49e0f01 --- /dev/null +++ b/src/ServiceStack.Redis/RedisResolver.cs @@ -0,0 +1,179 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public class RedisResolver : IRedisResolver, IRedisResolverExtended + { + static ILog log = LogManager.GetLogger(typeof(RedisResolver)); + + public Func ClientFactory { get; set; } + + public int ReadWriteHostsCount { get; private set; } + public int ReadOnlyHostsCount { get; private set; } + + HashSet allHosts = new HashSet(); + + private RedisEndpoint[] masters; + private RedisEndpoint[] replicas; + + public RedisEndpoint[] Masters => masters; + + public RedisEndpoint[] Slaves => replicas; + + public RedisResolver() + : this(TypeConstants.EmptyArray, TypeConstants.EmptyArray) {} + + public RedisResolver(IEnumerable masters, IEnumerable replicas) + : this(masters.ToRedisEndPoints(), replicas.ToRedisEndPoints()){} + + public RedisResolver(IEnumerable masters, IEnumerable replicas) + { + ResetMasters(masters.ToList()); + ResetSlaves(replicas.ToList()); + ClientFactory = RedisConfig.ClientFactory; + } + + public virtual void ResetMasters(IEnumerable hosts) + { + ResetMasters(hosts.ToRedisEndPoints()); + } + + public virtual void ResetMasters(List newMasters) + { + if (newMasters == null || newMasters.Count == 0) + throw new Exception("Must provide at least 1 master"); + + masters = newMasters.ToArray(); + ReadWriteHostsCount = masters.Length; + newMasters.Each(x => allHosts.Add(x)); + + if (log.IsDebugEnabled) + log.Debug("New Redis Masters: " + string.Join(", ", masters.Map(x => x.GetHostString()))); + } + + public virtual void ResetSlaves(IEnumerable hosts) + { + ResetSlaves(hosts.ToRedisEndPoints()); + } + + public virtual void ResetSlaves(List newReplicas) + { + replicas = (newReplicas ?? new List()).ToArray(); + ReadOnlyHostsCount = replicas.Length; + newReplicas.Each(x => allHosts.Add(x)); + + if (log.IsDebugEnabled) + log.Debug("New Redis Replicas: " + string.Join(", ", replicas.Map(x => x.GetHostString()))); + } + + public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) + { + var client = ClientFactory(config); + + if (master && RedisConfig.VerifyMasterConnections) + { + var firstAttempt = DateTime.UtcNow; + Exception firstEx = null; + var retryTimeSpan = TimeSpan.FromMilliseconds(config.RetryTimeout); + var i = 0; + while (DateTime.UtcNow - firstAttempt < retryTimeSpan) + { + try + { + client = GetValidMaster(client, config); + return client; + } + catch (Exception ex) + { + if (!RedisConfig.RetryReconnectOnFailedMasters) + throw; + + firstEx ??= ex; + ExecUtils.SleepBackOffMultiplier(++i); + client?.Dispose(); + client = ClientFactory(config); + } + } + throw new TimeoutException($"Could not resolve master instance within {config.RetryTimeout}ms RetryTimeout", firstEx); + } + + return client; + } + + protected RedisClient GetValidMaster(RedisClient client, RedisEndpoint config) + { + var role = client.GetServerRole(); + if (role != RedisServerRole.Master) + { + Interlocked.Increment(ref RedisState.TotalInvalidMasters); + log.Error("Redis Master Host '{0}' is {1}. Resetting allHosts...".Fmt(config.GetHostString(), role)); + var newMasters = new List(); + var newReplicas = new List(); + RedisClient masterClient = null; + foreach (var hostConfig in allHosts) + { + try + { + var testClient = ClientFactory(hostConfig); + testClient.ConnectTimeout = RedisConfig.HostLookupTimeoutMs; + var testRole = testClient.GetServerRole(); + switch (testRole) + { + case RedisServerRole.Master: + newMasters.Add(hostConfig); + if (masterClient == null) + masterClient = testClient; + break; + case RedisServerRole.Slave: + newReplicas.Add(hostConfig); + break; + } + + } + catch { /* skip */ } + } + + if (masterClient == null) + { + Interlocked.Increment(ref RedisState.TotalNoMastersFound); + var errorMsg = "No master found in: " + string.Join(", ", allHosts.Map(x => x.GetHostString())); + log.Error(errorMsg); + throw new InvalidDataException(errorMsg); + } + + ResetMasters(newMasters); + ResetSlaves(newReplicas); + return masterClient; + } + return client; + } + + public RedisEndpoint GetReadWriteHost(int desiredIndex) + { + return masters[desiredIndex % masters.Length]; + } + + public RedisEndpoint GetReadOnlyHost(int desiredIndex) + { + return ReadOnlyHostsCount > 0 + ? replicas[desiredIndex % replicas.Length] + : GetReadWriteHost(desiredIndex); + } + + public RedisClient CreateMasterClient(int desiredIndex) + { + return CreateRedisClient(GetReadWriteHost(desiredIndex), master: true); + } + + public RedisClient CreateSlaveClient(int desiredIndex) + { + return CreateRedisClient(GetReadOnlyHost(desiredIndex), master: false); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisResponseException.cs b/src/ServiceStack.Redis/RedisResponseException.cs index ef4ce166..c7dba956 100644 --- a/src/ServiceStack.Redis/RedisResponseException.cs +++ b/src/ServiceStack.Redis/RedisResponseException.cs @@ -1,13 +1,13 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // namespace ServiceStack.Redis diff --git a/src/ServiceStack.Redis/RedisRetryableException.cs b/src/ServiceStack.Redis/RedisRetryableException.cs new file mode 100644 index 00000000..7155684a --- /dev/null +++ b/src/ServiceStack.Redis/RedisRetryableException.cs @@ -0,0 +1,18 @@ +namespace ServiceStack.Redis +{ + public class RedisRetryableException + : RedisException + { + public RedisRetryableException(string message) + : base(message) + { + } + + public RedisRetryableException(string message, string code) : base(message) + { + Code = code; + } + + public string Code { get; private set; } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisScripts.cs b/src/ServiceStack.Redis/RedisScripts.cs new file mode 100644 index 00000000..7def2bd9 --- /dev/null +++ b/src/ServiceStack.Redis/RedisScripts.cs @@ -0,0 +1,287 @@ +using System; +using System.Collections; +using System.Linq; +using System.Collections.Generic; +using ServiceStack.Script; + +namespace ServiceStack.Redis +{ + public class RedisSearchCursorResult + { + public int Cursor { get; set; } + public List Results { get; set; } + } + + public class RedisSearchResult + { + public string Id { get; set; } + public string Type { get; set; } + public long Ttl { get; set; } + public long Size { get; set; } + } + + [Obsolete("Use RedisScripts")] + public class TemplateRedisFilters : RedisScripts {} + + public class RedisScripts : ScriptMethods + { + private const string RedisConnection = "__redisConnection"; + + private IRedisClientsManager redisManager; + public IRedisClientsManager RedisManager + { + get => redisManager ?? (redisManager = Context.Container.Resolve()); + set => redisManager = value; + } + + T exec(Func fn, ScriptScopeContext scope, object options) + { + try + { + if ((options is Dictionary obj && obj.TryGetValue("connectionString", out var oRedisConn)) + || scope.PageResult.Args.TryGetValue(RedisConnection, out oRedisConn)) + { + using (var redis = new RedisClient((string)oRedisConn)) + { + return fn(redis); + } + } + + using (var redis = RedisManager.GetClient()) + { + return fn(redis); + } + } + catch (Exception ex) + { + throw new StopFilterExecutionException(scope, options, ex); + } + } + + public IgnoreResult useRedis(ScriptScopeContext scope, string redisConnection) + { + if (redisConnection == null) + scope.PageResult.Args.Remove(RedisConnection); + else + scope.PageResult.Args[RedisConnection] = redisConnection; + + return IgnoreResult.Value; + } + + static readonly Dictionary cmdArgCounts = new Dictionary(StringComparer.OrdinalIgnoreCase) { + { "SET", 3 } + }; + + List parseCommandString(string cmd) + { + var args = new List(); + var lastPos = 0; + for (var i = 0; i < cmd.Length; i++) + { + var c = cmd[i]; + if (c == '{' || c == '[') + { + break; //stop splitting args if value is complex type + } + if (c == ' ') + { + var arg = cmd.Substring(lastPos, i - lastPos); + args.Add(arg); + lastPos = i + 1; + + //if we've reached the command args count, capture the rest of the body as the last arg + if (cmdArgCounts.TryGetValue(args[0], out int argCount) && args.Count == argCount - 1) + break; + } + } + args.Add(cmd.Substring(lastPos)); + return args; + } + + object toObject(RedisText r) + { + if (r == null) + return null; + + if (r.Children != null && r.Children.Count > 0) + { + var to = new List(); + for (var i = 0; i < r.Children.Count; i++) + { + var child = r.Children[i]; + var value = child.Text ?? toObject(child); + to.Add(value); + } + return to; + } + return r.Text; + } + + public object redisCall(ScriptScopeContext scope, object redisCommand) => redisCall(scope, redisCommand, null); + public object redisCall(ScriptScopeContext scope, object redisCommand, object options) + { + if (redisCommand == null) + return null; + + List args; + if (redisCommand is string cmd) + { + if (string.IsNullOrEmpty(cmd)) + return null; + + args = parseCommandString(cmd); + } + else if (redisCommand is IEnumerable e && !(e is IDictionary)) + { + args = new List(); + foreach (var arg in e) + { + if (arg == null) continue; + args.Add(arg.ToString()); + } + } + else + throw new NotSupportedException($"redisCall expects a string or an object args but received a {redisCommand.GetType().Name} instead."); + + var objParams = args.Select(x => (object)x).ToArray(); + var redisText = exec(r => r.Custom(objParams), scope, options); + var result = toObject(redisText); + return result; + } + + public List redisSearchKeys(ScriptScopeContext scope, string query) => redisSearchKeys(scope, query, null); + public List redisSearchKeys(ScriptScopeContext scope, string query, object options) + { + var json = redisSearchKeysAsJson(scope, query, options); + const string noResult = "{\"cursor\":0,\"results\":{}}"; + if (json == noResult) + return new List(); + + var searchResults = json.FromJson(); + return searchResults.Results; + } + + public Dictionary redisInfo(ScriptScopeContext scope) => redisInfo(scope, null); + public Dictionary redisInfo(ScriptScopeContext scope, object options) => exec(r => r.Info, scope, options); + + public string redisConnectionString(ScriptScopeContext scope) => exec(r => $"{r.Host}:{r.Port}?db={r.Db}", scope, null); + + public Dictionary redisConnection(ScriptScopeContext scope) => exec(r => new Dictionary + { + { "host", r.Host }, + { "port", r.Port }, + { "db", r.Db }, + }, scope, null); + + public string redisToConnectionString(ScriptScopeContext scope, object connectionInfo) => redisToConnectionString(scope, connectionInfo, null); + public string redisToConnectionString(ScriptScopeContext scope, object connectionInfo, object options) + { + var connectionString = connectionInfo as string; + if (connectionString != null) + return connectionString; + + if (connectionInfo is IDictionary d) + { + var host = (d.TryGetValue("host", out object h) ? h as string : null) ?? "localhost"; + var port = d.TryGetValue("port", out object p) ? DynamicInt.Instance.ConvertFrom(p) : 6379; + var db = d.TryGetValue("db", out object oDb) ? DynamicInt.Instance.ConvertFrom(oDb) : 0; + + connectionString = $"{host}:{port}?db={db}"; + + if (d.TryGetValue("password", out object password)) + connectionString += "&password=" + password.ToString().UrlEncode(); + } + + return connectionString; + } + + public string redisChangeConnection(ScriptScopeContext scope, object newConnection) => redisChangeConnection(scope, newConnection, null); + public string redisChangeConnection(ScriptScopeContext scope, object newConnection, object options) + { + try + { + var connectionString = redisToConnectionString(scope, newConnection, options); + if (connectionString == null) + throw new NotSupportedException(nameof(redisChangeConnection) + " expects a String or an ObjectDictionary but received: " + (newConnection?.GetType().Name ?? "null")); + + using (var testConnection = new RedisClient(connectionString)) + { + testConnection.Ping(); + } + + ((IRedisFailover)RedisManager).FailoverTo(connectionString); + + return connectionString; + } + catch (Exception ex) + { + throw new StopFilterExecutionException(scope, options ?? newConnection as IDictionary, ex); + } + } + + public string redisSearchKeysAsJson(ScriptScopeContext scope, string query, object options) + { + if (string.IsNullOrEmpty(query)) + return null; + + try + { + var args = scope.AssertOptions(nameof(redisSearchKeys), options); + var limit = args.TryGetValue("limit", out object value) + ? value.ConvertTo() + : scope.GetValue("redis.search.limit") ?? 100; + + const string LuaScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = tonumber(ARGV[3]) +local len = 0 +local keys = {} +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(keys, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit +local cursorAttrs = {['cursor'] = cursor, ['results'] = {}} +if len == 0 then + return cjson.encode(cursorAttrs) +end + +local keyAttrs = {} +for i,key in ipairs(keys) do + local type = redis.call('type', key)['ok'] + local pttl = redis.call('pttl', key) + local size = 0 + if type == 'string' then + size = redis.call('strlen', key) + elseif type == 'list' then + size = redis.call('llen', key) + elseif type == 'set' then + size = redis.call('scard', key) + elseif type == 'zset' then + size = redis.call('zcard', key) + elseif type == 'hash' then + size = redis.call('hlen', key) + end + local attrs = {['id'] = key, ['type'] = type, ['ttl'] = pttl, ['size'] = size, ['foo'] = 'bar'} + table.insert(keyAttrs, attrs) +end +cursorAttrs['results'] = keyAttrs +return cjson.encode(cursorAttrs)"; + + var json = exec(r => r.ExecCachedLua(LuaScript, sha1 => + r.ExecLuaShaAsString(sha1, query, limit.ToString(), "0")), scope, options); + + return json; + } + catch (Exception ex) + { + throw new StopFilterExecutionException(scope, options, ex); + } + } + } +} diff --git a/src/ServiceStack.Redis/RedisSentinel.cs b/src/ServiceStack.Redis/RedisSentinel.cs new file mode 100644 index 00000000..bdac92c8 --- /dev/null +++ b/src/ServiceStack.Redis/RedisSentinel.cs @@ -0,0 +1,463 @@ +// +// Redis Sentinel will connect to a Redis Sentinel Instance and create an IRedisClientsManager based off of the first sentinel that returns data +// +// Upon failure of a sentinel, other sentinels will be attempted to be connected to +// Upon a s_down event, the RedisClientsManager will be failed over to the new set of masters/replicas +// + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public class RedisSentinel : IRedisSentinel + { + protected static readonly ILog Log = LogManager.GetLogger(typeof(RedisSentinel)); + + public static string DefaultMasterName = "mymaster"; + public static string DefaultAddress = "127.0.0.1:26379"; + + private readonly object oLock = new object(); + private bool isDisposed = false; + + private readonly string masterName; + public string MasterName => masterName; + + private int failures = 0; + private int sentinelIndex = -1; + public List SentinelHosts { get; private set; } + internal RedisEndpoint[] SentinelEndpoints { get; private set; } + private RedisSentinelWorker worker; + private static int MaxFailures = 5; + + /// + /// Change to use a different IRedisClientsManager + /// + public Func RedisManagerFactory { get; set; } + + /// + /// Configure the Redis Connection String to use for a Redis Instance Host + /// + public Func HostFilter { get; set; } + + /// + /// Configure the Redis Connection String to use for a Redis Sentinel Host + /// + public Func SentinelHostFilter { get; set; } + + /// + /// The configured Redis Client Manager this Sentinel managers + /// + public IRedisClientsManager RedisManager { get; set; } + + /// + /// Fired when Sentinel fails over the Redis Client Manager to a new master + /// + public Action OnFailover { get; set; } + + /// + /// Fired when the Redis Sentinel Worker connection fails + /// + public Action OnWorkerError { get; set; } + + /// + /// Fired when the Sentinel worker receives a message from the Sentinel Subscription + /// + public Action OnSentinelMessageReceived { get; set; } + + /// + /// Map the internal IP's returned by Sentinels to its external IP + /// + public Dictionary IpAddressMap { get; set; } + + /// + /// Whether to routinely scan for other sentinel hosts (default true) + /// + public bool ScanForOtherSentinels { get; set; } = true; + + /// + /// What interval to scan for other sentinel hosts (default 10 mins) + /// + public TimeSpan RefreshSentinelHostsAfter { get; set; } = TimeSpan.FromMinutes(10); + private DateTime lastSentinelsRefresh; + + /// + /// How long to wait after failing before connecting to next redis instance (default 250ms) + /// + public TimeSpan WaitBetweenFailedHosts { get; set; } = TimeSpan.FromMilliseconds(250); + + /// + /// How long to retry connecting to hosts before throwing (default 60 secs) + /// + public TimeSpan MaxWaitBetweenFailedHosts { get; set; } = TimeSpan.FromSeconds(60); + + /// + /// How long to wait after consecutive failed connection attempts to master before forcing + /// a Sentinel to failover the current master (default 60 secs) + /// + public TimeSpan WaitBeforeForcingMasterFailover { get; set; } = TimeSpan.FromSeconds(60); + + /// + /// The Max Connection time for Sentinel Worker (default 250ms) + /// + public int SentinelWorkerConnectTimeoutMs { get; set; } = 250; + + /// + /// The Max TCP Socket Receive time for Sentinel Worker (default 250ms) + /// + public int SentinelWorkerReceiveTimeoutMs { get; set; } = 250; + + /// + /// The Max TCP Socket Send time for Sentinel Worker (default 250ms) + /// + public int SentinelWorkerSendTimeoutMs { get; set; } = 250; + + /// + /// Reset client connections when Sentinel reports redis instance is subjectively down (default true) + /// + public bool ResetWhenSubjectivelyDown { get; set; } = true; + + /// + /// Reset client connections when Sentinel reports redis instance is objectively down (default true) + /// + public bool ResetWhenObjectivelyDown { get; set; } = true; + + internal string DebugId => $""; + + public RedisSentinel(string sentinelHost = null, string masterName = null) + : this(new[] { sentinelHost ?? DefaultAddress }, masterName ?? DefaultMasterName) { } + + public RedisSentinel(IEnumerable sentinelHosts, string masterName = null) + { + this.SentinelHosts = sentinelHosts?.ToList(); + + if (SentinelHosts == null || SentinelHosts.Count == 0) + throw new ArgumentException("sentinels must have at least one entry"); + + this.masterName = masterName ?? DefaultMasterName; + IpAddressMap = new Dictionary(); + RedisManagerFactory = (masters, replicas) => new PooledRedisClientManager(masters, replicas); + } + + /// + /// Initialize Sentinel Subscription and Configure Redis ClientsManager + /// + public IRedisClientsManager Start() + { + lock (oLock) + { + for (int i = 0; i < SentinelHosts.Count; i++) + { + var parts = SentinelHosts[i].SplitOnLast(':'); + if (parts.Length == 1) + { + SentinelHosts[i] = parts[0] + ":" + RedisConfig.DefaultPortSentinel; + } + } + + if (ScanForOtherSentinels) + RefreshActiveSentinels(); + + SentinelEndpoints = SentinelHosts + .Map(x => x.ToRedisEndpoint(defaultPort: RedisConfig.DefaultPortSentinel)) + .ToArray(); + + var sentinelWorker = GetValidSentinelWorker(); + + if (this.RedisManager == null || sentinelWorker == null) + throw new Exception("Unable to resolve sentinels!"); + + return this.RedisManager; + } + } + + public List GetActiveSentinelHosts(IEnumerable sentinelHosts) + { + var activeSentinelHosts = new List(); + foreach (var sentinelHost in sentinelHosts.ToArray()) + { + try + { + if (Log.IsDebugEnabled) + Log.Debug("Connecting to all available Sentinels to discover Active Sentinel Hosts..."); + + var endpoint = sentinelHost.ToRedisEndpoint(defaultPort: RedisConfig.DefaultPortSentinel); + using (var sentinelWorker = new RedisSentinelWorker(this, endpoint)) + { + if (!activeSentinelHosts.Contains(sentinelHost)) + activeSentinelHosts.Add(sentinelHost); + + var activeHosts = sentinelWorker.GetSentinelHosts(MasterName); + foreach (var activeHost in activeHosts) + { + if (!activeSentinelHosts.Contains(activeHost)) + { + activeSentinelHosts.Add(SentinelHostFilter != null + ? SentinelHostFilter(activeHost) + : activeHost); + } + } + } + + if (Log.IsDebugEnabled) + Log.Debug("All active Sentinels Found: " + string.Join(", ", activeSentinelHosts)); + } + catch (Exception ex) + { + Log.Error("Could not get active Sentinels from: {0}".Fmt(sentinelHost), ex); + } + } + return activeSentinelHosts; + } + + public void RefreshActiveSentinels() + { + var activeHosts = GetActiveSentinelHosts(SentinelHosts); + if (activeHosts.Count == 0) return; + + lock (SentinelHosts) + { + lastSentinelsRefresh = DateTime.UtcNow; + + activeHosts.Each(x => + { + if (!SentinelHosts.Contains(x)) + SentinelHosts.Add(x); + }); + + SentinelEndpoints = SentinelHosts + .Map(x => x.ToRedisEndpoint(defaultPort: RedisConfig.DefaultPortSentinel)) + .ToArray(); + } + } + + internal string[] ConfigureHosts(IEnumerable hosts) + { + if (hosts == null) + return TypeConstants.EmptyStringArray; + + return HostFilter == null + ? hosts.ToArray() + : hosts.Map(HostFilter).ToArray(); + } + + public SentinelInfo ResetClients() + { + var sentinelInfo = GetSentinelInfo(); + + if (RedisManager == null) + { + if (Log.IsDebugEnabled) + Log.Debug($"Configuring initial Redis Clients: {sentinelInfo}"); + + RedisManager = CreateRedisManager(sentinelInfo); + } + else + { + if (Log.IsDebugEnabled) + Log.Debug($"Failing over to Redis Clients: {sentinelInfo}"); + + ((IRedisFailover)RedisManager).FailoverTo( + ConfigureHosts(sentinelInfo.RedisMasters), + ConfigureHosts(sentinelInfo.RedisSlaves)); + } + + return sentinelInfo; + } + + private IRedisClientsManager CreateRedisManager(SentinelInfo sentinelInfo) + { + var masters = ConfigureHosts(sentinelInfo.RedisMasters); + var replicas = ConfigureHosts(sentinelInfo.RedisSlaves); + var redisManager = RedisManagerFactory(masters, replicas); + + var hasRedisResolver = (IHasRedisResolver)redisManager; + hasRedisResolver.RedisResolver = new RedisSentinelResolver(this, masters, replicas); + + if (redisManager is IRedisFailover canFailover && this.OnFailover != null) + { + canFailover.OnFailover.Add(this.OnFailover); + } + return redisManager; + } + + public IRedisClientsManager GetRedisManager() => + RedisManager ??= CreateRedisManager(GetSentinelInfo()); + + private RedisSentinelWorker GetValidSentinelWorker() + { + if (isDisposed) + throw new ObjectDisposedException(GetType().Name); + + if (this.worker != null) + return this.worker; + + RedisException lastEx = null; + + while (this.worker == null && ShouldRetry()) + { + var step = 0; + try + { + this.worker = GetNextSentinel(); + step = 1; + GetRedisManager(); + + step = 2; + this.worker.BeginListeningForConfigurationChanges(); + this.failures = 0; //reset + return this.worker; + } + catch (RedisException ex) + { + if (Log.IsDebugEnabled) + { + var name = step switch { + 0 => "GetNextSentinel()", + 1 => "GetRedisManager()", + 2 => "BeginListeningForConfigurationChanges()", + _ => $"Step {step}", + }; + Log.Debug($"Failed to {name}: {ex.Message}"); + } + + if (OnWorkerError != null) + OnWorkerError(ex); + + lastEx = ex; + this.worker = null; + this.failures++; + Interlocked.Increment(ref RedisState.TotalFailedSentinelWorkers); + } + } + + this.failures = 0; //reset + TaskUtils.Sleep(WaitBetweenFailedHosts); + throw new RedisException("No Redis Sentinels were available", lastEx); + } + + public RedisEndpoint GetMaster() + { + var sentinelWorker = GetValidSentinelWorker(); + var host = sentinelWorker.GetMasterHost(masterName); + + if (ScanForOtherSentinels && DateTime.UtcNow - lastSentinelsRefresh > RefreshSentinelHostsAfter) + { + RefreshActiveSentinels(); + } + + return host != null + ? (HostFilter != null ? HostFilter(host) : host).ToRedisEndpoint() + : null; + } + + public List GetSlaves() + { + var sentinelWorker = GetValidSentinelWorker(); + var hosts = sentinelWorker.GetReplicaHosts(masterName); + return ConfigureHosts(hosts).Map(x => x.ToRedisEndpoint()); + } + + /// + /// Check if GetValidSentinel should try the next sentinel server + /// + /// + /// This will be true if the failures is less than either RedisSentinel.MaxFailures or the # of sentinels, whatever is greater + private bool ShouldRetry() + { + return this.failures < Math.Max(MaxFailures, this.SentinelEndpoints.Length); + } + + private RedisSentinelWorker GetNextSentinel() + { + RedisSentinelWorker disposeWorker = null; + + try + { + lock (oLock) + { + if (this.worker != null) + { + disposeWorker = this.worker; + this.worker = null; + } + + if (++sentinelIndex >= SentinelEndpoints.Length) + sentinelIndex = 0; + + if (Log.IsDebugEnabled) + Log.Debug($"Attempt to connect to next sentinel '{SentinelEndpoints[sentinelIndex]}'..."); + + var sentinelWorker = new RedisSentinelWorker(this, SentinelEndpoints[sentinelIndex]) + { + OnSentinelError = OnSentinelError + }; + + return sentinelWorker; + } + } + finally + { + disposeWorker?.Dispose(); + } + } + + private void OnSentinelError(Exception ex) + { + if (this.worker != null) + { + Log.Error("Error on existing SentinelWorker, reconnecting..."); + + if (OnWorkerError != null) + OnWorkerError(ex); + + this.worker = GetNextSentinel(); + this.worker.BeginListeningForConfigurationChanges(); + } + } + + public void ForceMasterFailover() + { + var sentinelWorker = GetValidSentinelWorker(); + sentinelWorker.ForceMasterFailover(masterName); + } + + public SentinelInfo GetSentinelInfo() + { + var sentinelWorker = GetValidSentinelWorker(); + return sentinelWorker.GetSentinelInfo(); + } + + public void Dispose() + { + this.isDisposed = true; + + new IDisposable[] { RedisManager, worker }.Dispose(); + } + } +} + +public class SentinelInfo +{ + public string MasterName { get; set; } + public string[] RedisMasters { get; set; } + public string[] RedisSlaves { get; set; } + + public SentinelInfo(string masterName, IEnumerable redisMasters, IEnumerable redisReplicas) + { + MasterName = masterName; + RedisMasters = redisMasters?.ToArray() ?? TypeConstants.EmptyStringArray; + RedisSlaves = redisReplicas?.ToArray() ?? TypeConstants.EmptyStringArray; + } + + public override string ToString() + { + return $"{MasterName} primary: {string.Join(", ", RedisMasters)}, replicas: {string.Join(", ", RedisSlaves)}"; + } +} diff --git a/src/ServiceStack.Redis/RedisSentinelResolver.cs b/src/ServiceStack.Redis/RedisSentinelResolver.cs new file mode 100644 index 00000000..e0baf03a --- /dev/null +++ b/src/ServiceStack.Redis/RedisSentinelResolver.cs @@ -0,0 +1,240 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public class RedisSentinelResolver : IRedisResolver, IRedisResolverExtended + { + static ILog log = LogManager.GetLogger(typeof(RedisResolver)); + + public Func ClientFactory { get; set; } + + public int ReadWriteHostsCount { get; private set; } + public int ReadOnlyHostsCount { get; private set; } + + HashSet allHosts = new HashSet(); + + private RedisSentinel sentinel; + + private RedisEndpoint[] masters; + private RedisEndpoint[] replicas; + + public RedisEndpoint[] Masters => masters; + public RedisEndpoint[] Slaves => replicas; + + public RedisSentinelResolver(RedisSentinel sentinel) + : this(sentinel, TypeConstants.EmptyArray, TypeConstants.EmptyArray) { } + + public RedisSentinelResolver(RedisSentinel sentinel, IEnumerable masters, IEnumerable replicas) + : this(sentinel, masters.ToRedisEndPoints(), replicas.ToRedisEndPoints()) { } + + public RedisSentinelResolver(RedisSentinel sentinel, IEnumerable masters, IEnumerable replicas) + { + this.sentinel = sentinel; + ResetMasters(masters.ToList()); + ResetSlaves(replicas.ToList()); + ClientFactory = RedisConfig.ClientFactory; + } + + public virtual void ResetMasters(IEnumerable hosts) + { + ResetMasters(hosts.ToRedisEndPoints()); + } + + public virtual void ResetMasters(List newMasters) + { + if (newMasters == null || newMasters.Count == 0) + throw new Exception("Must provide at least 1 master"); + + masters = newMasters.ToArray(); + ReadWriteHostsCount = masters.Length; + newMasters.Each(x => allHosts.Add(x)); + + if (log.IsDebugEnabled) + log.Debug("New Redis Masters: " + string.Join(", ", masters.Map(x => x.GetHostString()))); + } + + public virtual void ResetSlaves(IEnumerable hosts) + { + ResetSlaves(hosts.ToRedisEndPoints()); + } + + public virtual void ResetSlaves(List newReplicas) + { + replicas = (newReplicas ?? new List()).ToArray(); + ReadOnlyHostsCount = replicas.Length; + newReplicas.Each(x => allHosts.Add(x)); + + if (log.IsDebugEnabled) + log.Debug("New Redis Replicas: " + string.Join(", ", replicas.Map(x => x.GetHostString()))); + } + + public RedisEndpoint GetReadWriteHost(int desiredIndex) + { + return sentinel.GetMaster() ?? masters[desiredIndex % masters.Length]; + } + + public RedisEndpoint GetReadOnlyHost(int desiredIndex) + { + var replicaEndpoints = sentinel.GetSlaves(); + if (replicaEndpoints.Count > 0) + return replicaEndpoints[desiredIndex % replicaEndpoints.Count]; + + return ReadOnlyHostsCount > 0 + ? replicas[desiredIndex % replicas.Length] + : GetReadWriteHost(desiredIndex); + } + + public RedisClient CreateMasterClient(int desiredIndex) + { + return CreateRedisClient(GetReadWriteHost(desiredIndex), master: true); + } + + public RedisClient CreateSlaveClient(int desiredIndex) + { + return CreateRedisClient(GetReadOnlyHost(desiredIndex), master: false); + } + + object oLock = new object(); + private string lastInvalidMasterHost = null; + private long lastValidMasterTicks = DateTime.UtcNow.Ticks; + + private DateTime lastValidMasterFromSentinelAt + { + get => new DateTime(Interlocked.Read(ref lastValidMasterTicks), DateTimeKind.Utc); + set => Interlocked.Exchange(ref lastValidMasterTicks, value.Ticks); + } + + public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) + { + var client = ClientFactory(config); + if (master) + { + var role = RedisServerRole.Unknown; + try + { + role = client.GetServerRole(); + if (role == RedisServerRole.Master) + { + lastValidMasterFromSentinelAt = DateTime.UtcNow; + return client; + } + } + catch (Exception ex) + { + Interlocked.Increment(ref RedisState.TotalInvalidMasters); + + if (client.GetHostString() == lastInvalidMasterHost) + { + lock (oLock) + { + if (DateTime.UtcNow - lastValidMasterFromSentinelAt > sentinel.WaitBeforeForcingMasterFailover) + { + lastInvalidMasterHost = null; + lastValidMasterFromSentinelAt = DateTime.UtcNow; + + log.Error("Valid master was not found at '{0}' within '{1}'. Sending SENTINEL failover...".Fmt( + client.GetHostString(), sentinel.WaitBeforeForcingMasterFailover), ex); + + Interlocked.Increment(ref RedisState.TotalForcedMasterFailovers); + + sentinel.ForceMasterFailover(); + TaskUtils.Sleep(sentinel.WaitBetweenFailedHosts); + role = client.GetServerRole(); + } + } + } + else + { + lastInvalidMasterHost = client.GetHostString(); + } + } + + if (role != RedisServerRole.Master && RedisConfig.VerifyMasterConnections) + { + try + { + var stopwatch = Stopwatch.StartNew(); + while (true) + { + try + { + var masterConfig = sentinel.GetMaster(); + var masterClient = ClientFactory(masterConfig); + masterClient.ConnectTimeout = sentinel.SentinelWorkerConnectTimeoutMs; + + var masterRole = masterClient.GetServerRole(); + if (masterRole == RedisServerRole.Master) + { + lastValidMasterFromSentinelAt = DateTime.UtcNow; + return masterClient; + } + else + { + Interlocked.Increment(ref RedisState.TotalInvalidMasters); + } + } + catch { /* Ignore errors until MaxWait */ } + + if (stopwatch.Elapsed > sentinel.MaxWaitBetweenFailedHosts) + throw new TimeoutException("Max Wait Between Sentinel Lookups Elapsed: {0}" + .Fmt(sentinel.MaxWaitBetweenFailedHosts.ToString())); + + TaskUtils.Sleep(sentinel.WaitBetweenFailedHosts); + } + } + catch (Exception ex) + { + log.Error("Redis Master Host '{0}' is {1}. Resetting allHosts...".Fmt(config.GetHostString(), role), ex); + + var newMasters = new List(); + var newReplicas = new List(); + RedisClient masterClient = null; + foreach (var hostConfig in allHosts) + { + try + { + var testClient = ClientFactory(hostConfig); + testClient.ConnectTimeout = RedisConfig.HostLookupTimeoutMs; + var testRole = testClient.GetServerRole(); + switch (testRole) + { + case RedisServerRole.Master: + newMasters.Add(hostConfig); + if (masterClient == null) + masterClient = testClient; + break; + case RedisServerRole.Slave: + newReplicas.Add(hostConfig); + break; + } + + } + catch { /* skip past invalid master connections */ } + } + + if (masterClient == null) + { + Interlocked.Increment(ref RedisState.TotalNoMastersFound); + var errorMsg = "No master found in: " + string.Join(", ", allHosts.Map(x => x.GetHostString())); + log.Error(errorMsg); + throw new Exception(errorMsg); + } + + ResetMasters(newMasters); + ResetSlaves(newReplicas); + return masterClient; + } + } + } + + return client; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisSentinelWorker.cs b/src/ServiceStack.Redis/RedisSentinelWorker.cs new file mode 100644 index 00000000..978ebf37 --- /dev/null +++ b/src/ServiceStack.Redis/RedisSentinelWorker.cs @@ -0,0 +1,215 @@ +using System.Threading; +using ServiceStack.Logging; +using System; +using System.Collections.Generic; + +namespace ServiceStack.Redis +{ + internal class RedisSentinelWorker : IDisposable + { + protected static readonly ILog Log = LogManager.GetLogger(typeof(RedisSentinelWorker)); + + static int IdCounter = 0; + public int Id { get; } + + private readonly object oLock = new(); + + private readonly RedisEndpoint sentinelEndpoint; + private readonly RedisSentinel sentinel; + private readonly RedisClient sentinelClient; + private RedisPubSubServer sentinelPubSub; + + public Action OnSentinelError; + + public RedisSentinelWorker(RedisSentinel sentinel, RedisEndpoint sentinelEndpoint) + { + this.Id = Interlocked.Increment(ref IdCounter); + this.sentinel = sentinel; + this.sentinelEndpoint = sentinelEndpoint; + this.sentinelClient = new RedisClient(sentinelEndpoint) { + Db = 0, //Sentinel Servers doesn't support DB, reset to 0 + ConnectTimeout = sentinel.SentinelWorkerConnectTimeoutMs, + ReceiveTimeout = sentinel.SentinelWorkerReceiveTimeoutMs, + SendTimeout = sentinel.SentinelWorkerSendTimeoutMs, + }; + + if (Log.IsDebugEnabled) + Log.Debug($"Set up Redis Sentinel on {sentinelEndpoint}"); + } + + /// + /// Event that is fired when the sentinel subscription raises an event + /// + /// + /// + private void SentinelMessageReceived(string channel, string message) + { + if (Log.IsDebugEnabled) + Log.Debug($"Received '{channel}' on channel '{message}' from Sentinel"); + + // {+|-}sdown is the event for server coming up or down + var c = channel.ToLower(); + var isSubjectivelyDown = c.Contains("sdown"); + if (isSubjectivelyDown) + Interlocked.Increment(ref RedisState.TotalSubjectiveServersDown); + + var isObjectivelyDown = c.Contains("odown"); + if (isObjectivelyDown) + Interlocked.Increment(ref RedisState.TotalObjectiveServersDown); + + if (c == "+failover-end" + || c == "+switch-master" + || (sentinel.ResetWhenSubjectivelyDown && isSubjectivelyDown) + || (sentinel.ResetWhenObjectivelyDown && isObjectivelyDown)) + { + if (Log.IsDebugEnabled) + Log.Debug($"Sentinel detected server down/up '{channel}' with message: {message}"); + + sentinel.ResetClients(); + } + + if (sentinel.OnSentinelMessageReceived != null) + sentinel.OnSentinelMessageReceived(channel, message); + } + + internal SentinelInfo GetSentinelInfo() + { + var masterHost = GetMasterHostInternal(sentinel.MasterName); + if (masterHost == null) + throw new RedisException("Redis Sentinel is reporting no master is available"); + + var sentinelInfo = new SentinelInfo( + sentinel.MasterName, + new[] { masterHost }, + GetReplicaHosts(sentinel.MasterName)); + + return sentinelInfo; + } + + internal string GetMasterHost(string masterName) + { + try + { + return GetMasterHostInternal(masterName); + } + catch (Exception ex) + { + if (OnSentinelError != null) + OnSentinelError(ex); + + return null; + } + } + + private string GetMasterHostInternal(string masterName) + { + List masterInfo; + lock (oLock) + masterInfo = sentinelClient.SentinelGetMasterAddrByName(masterName); + + return masterInfo.Count > 0 + ? SanitizeMasterConfig(masterInfo) + : null; + } + + private string SanitizeMasterConfig(List masterInfo) + { + var ip = masterInfo[0]; + var port = masterInfo[1]; + + if (sentinel.IpAddressMap.TryGetValue(ip, out var aliasIp)) + ip = aliasIp; + + return $"{ip}:{port}"; + } + + internal List GetSentinelHosts(string masterName) + { + List> sentinelSentinels; + lock (oLock) + sentinelSentinels = this.sentinelClient.SentinelSentinels(sentinel.MasterName); + + return SanitizeHostsConfig(sentinelSentinels); + } + + internal List GetReplicaHosts(string masterName) + { + List> sentinelReplicas; + + lock (oLock) + sentinelReplicas = sentinelClient.SentinelSlaves(sentinel.MasterName); + + return SanitizeHostsConfig(sentinelReplicas); + } + + private List SanitizeHostsConfig(IEnumerable> replicas) + { + var servers = new List(); + foreach (var replica in replicas) + { + replica.TryGetValue("flags", out var flags); + replica.TryGetValue("ip", out var ip); + replica.TryGetValue("port", out var port); + + if (sentinel.IpAddressMap.TryGetValue(ip, out var aliasIp)) + ip = aliasIp; + else if (ip == "127.0.0.1") + ip = this.sentinelClient.Host; + + if (ip != null && port != null && !flags.Contains("s_down") && !flags.Contains("o_down")) + servers.Add($"{ip}:{port}"); + } + return servers; + } + + public void BeginListeningForConfigurationChanges() + { + try + { + lock (oLock) + { + if (this.sentinelPubSub == null) + { + var currentSentinelHost = new[] {sentinelEndpoint}; + var sentinelManager = new BasicRedisClientManager(currentSentinelHost, currentSentinelHost) + { + //Use BasicRedisResolver which doesn't validate non-Master Sentinel instances + RedisResolver = new BasicRedisResolver(currentSentinelHost, currentSentinelHost) + }; + + if (Log.IsDebugEnabled) + Log.Debug($"Starting subscription to {sentinel.SentinelHosts.ToArray()}, replicas: {sentinel.SentinelHosts.ToArray()}..."); + + this.sentinelPubSub = new RedisPubSubServer(sentinelManager) + { + HeartbeatInterval = null, + IsSentinelSubscription = true, + ChannelsMatching = new[] { RedisPubSubServer.AllChannelsWildCard }, + OnMessage = SentinelMessageReceived + }; + } + } + + this.sentinelPubSub.Start(); + } + catch (Exception ex) + { + Log.Error($"Error Subscribing to Redis Channel on {sentinelClient.Host}:{sentinelClient.Port}", ex); + + if (OnSentinelError != null) + OnSentinelError(ex); + } + } + + public void ForceMasterFailover(string masterName) + { + lock (oLock) + this.sentinelClient.SentinelFailover(masterName); + } + + public void Dispose() + { + new IDisposable[] { this.sentinelClient, sentinelPubSub }.Dispose(Log); + } + } +} diff --git a/src/ServiceStack.Redis/RedisState.cs b/src/ServiceStack.Redis/RedisState.cs new file mode 100644 index 00000000..3c6c7b13 --- /dev/null +++ b/src/ServiceStack.Redis/RedisState.cs @@ -0,0 +1,104 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Threading; +using ServiceStack.Logging; + +namespace ServiceStack.Redis +{ + /// + /// Don't immediately kill connections of active clients after failover to give them a chance to dispose gracefully. + /// Deactivating clients are automatically cleared from the pool. + /// + internal static class RedisState + { + private static ILog log = LogManager.GetLogger(typeof(RedisState)); + + internal static long TotalCommandsSent = 0; + internal static long TotalFailovers = 0; + internal static long TotalDeactivatedClients = 0; + internal static long TotalFailedSentinelWorkers = 0; + internal static long TotalForcedMasterFailovers = 0; + internal static long TotalInvalidMasters = 0; + internal static long TotalNoMastersFound = 0; + internal static long TotalClientsCreated = 0; + internal static long TotalClientsCreatedOutsidePool = 0; + internal static long TotalSubjectiveServersDown = 0; + internal static long TotalObjectiveServersDown = 0; + internal static long TotalRetryCount = 0; + internal static long TotalRetrySuccess = 0; + internal static long TotalRetryTimedout = 0; + + internal static readonly ConcurrentDictionary DeactivatedClients = new ConcurrentDictionary(); + + internal static void DeactivateClient(RedisClient client) + { + Interlocked.Increment(ref TotalDeactivatedClients); + + if (RedisConfig.DeactivatedClientsExpiry == TimeSpan.Zero) + { + client.DisposeConnection(); + return; + } + + var deactivatedAt = client.DeactivatedAt ?? DateTime.UtcNow; + client.DeactivatedAt = deactivatedAt; + + if (!DeactivatedClients.TryAdd(client, deactivatedAt)) + client.DisposeConnection(); + } + + internal static void DisposeExpiredClients() + { + if (RedisConfig.DeactivatedClientsExpiry == TimeSpan.Zero || DeactivatedClients.Count == 0) + return; + + var now = DateTime.UtcNow; + var removeDisposed = new List(); + + foreach (var entry in DeactivatedClients) + { + try + { + if (now - entry.Value <= RedisConfig.DeactivatedClientsExpiry) + continue; + + if (log.IsDebugEnabled) + log.Debug("Disposed Deactivated Client: {0}".Fmt(entry.Key.GetHostString())); + + entry.Key.DisposeConnection(); + removeDisposed.Add(entry.Key); + } + catch + { + removeDisposed.Add(entry.Key); + } + } + + if (removeDisposed.Count == 0) + return; + + var dict = ((IDictionary)DeactivatedClients); + foreach (var client in removeDisposed) + { + dict.Remove(client); + } + } + + internal static void DisposeAllDeactivatedClients() + { + if (RedisConfig.DeactivatedClientsExpiry == TimeSpan.Zero) + return; + + var allClients = DeactivatedClients.Keys.ToArray(); + DeactivatedClients.Clear(); + foreach (var client in allClients) + { + if (log.IsDebugEnabled) + log.Debug("Disposed Deactivated Client (All): {0}".Fmt(client.GetHostString())); + + client.DisposeConnection(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisStats.cs b/src/ServiceStack.Redis/RedisStats.cs new file mode 100644 index 00000000..4b357f18 --- /dev/null +++ b/src/ServiceStack.Redis/RedisStats.cs @@ -0,0 +1,168 @@ +using System.Collections.Generic; +using System.Threading; + +namespace ServiceStack.Redis +{ + public static class RedisStats + { + /// + /// Total number of commands sent + /// + public static long TotalCommandsSent + { + get { return Interlocked.Read(ref RedisState.TotalCommandsSent); } + } + + /// + /// Number of times the Redis Client Managers have FailoverTo() either by sentinel or manually + /// + public static long TotalFailovers + { + get { return Interlocked.Read(ref RedisState.TotalFailovers); } + } + + /// + /// Number of times a Client was deactivated from the pool, either by FailoverTo() or exceptions on client + /// + public static long TotalDeactivatedClients + { + get { return Interlocked.Read(ref RedisState.TotalDeactivatedClients); } + } + + /// + /// Number of times connecting to a Sentinel has failed + /// + public static long TotalFailedSentinelWorkers + { + get { return Interlocked.Read(ref RedisState.TotalFailedSentinelWorkers); } + } + + /// + /// Number of times we've forced Sentinel to failover to another master due to + /// consecutive errors beyond sentinel.WaitBeforeForcingMasterFailover + /// + public static long TotalForcedMasterFailovers + { + get { return Interlocked.Read(ref RedisState.TotalForcedMasterFailovers); } + } + + /// + /// Number of times a connecting to a reported Master wasn't actually a Master + /// + public static long TotalInvalidMasters + { + get { return Interlocked.Read(ref RedisState.TotalInvalidMasters); } + } + + /// + /// Number of times no Masters could be found in any of the configured hosts + /// + public static long TotalNoMastersFound + { + get { return Interlocked.Read(ref RedisState.TotalNoMastersFound); } + } + + /// + /// Number of Redis Client instances created with RedisConfig.ClientFactory + /// + public static long TotalClientsCreated + { + get { return Interlocked.Read(ref RedisState.TotalClientsCreated); } + } + + /// + /// Number of times a Redis Client was created outside of pool, either due to overflow or reserved slot was overridden + /// + public static long TotalClientsCreatedOutsidePool + { + get { return Interlocked.Read(ref RedisState.TotalClientsCreatedOutsidePool); } + } + + /// + /// Number of times Redis Sentinel reported a Subjective Down (sdown) + /// + public static long TotalSubjectiveServersDown + { + get { return Interlocked.Read(ref RedisState.TotalSubjectiveServersDown); } + } + + /// + /// Number of times Redis Sentinel reported an Objective Down (sdown) + /// + public static long TotalObjectiveServersDown + { + get { return Interlocked.Read(ref RedisState.TotalObjectiveServersDown); } + } + + /// + /// Number of times a Redis Request was retried due to Socket or Retryable exception + /// + public static long TotalRetryCount + { + get { return Interlocked.Read(ref RedisState.TotalRetryCount); } + } + + /// + /// Number of times a Request succeeded after it was retried + /// + public static long TotalRetrySuccess + { + get { return Interlocked.Read(ref RedisState.TotalRetrySuccess); } + } + + /// + /// Number of times a Retry Request failed after exceeding RetryTimeout + /// + public static long TotalRetryTimedout + { + get { return Interlocked.Read(ref RedisState.TotalRetryTimedout); } + } + + /// + /// Total number of deactivated clients that are pending being disposed + /// + public static long TotalPendingDeactivatedClients + { + get { return RedisState.DeactivatedClients.Count; } + } + + public static void Reset() + { + Interlocked.Exchange(ref RedisState.TotalFailovers, 0); + Interlocked.Exchange(ref RedisState.TotalDeactivatedClients, 0); + Interlocked.Exchange(ref RedisState.TotalFailedSentinelWorkers, 0); + Interlocked.Exchange(ref RedisState.TotalForcedMasterFailovers, 0); + Interlocked.Exchange(ref RedisState.TotalInvalidMasters, 0); + Interlocked.Exchange(ref RedisState.TotalNoMastersFound, 0); + Interlocked.Exchange(ref RedisState.TotalClientsCreated, 0); + Interlocked.Exchange(ref RedisState.TotalClientsCreatedOutsidePool, 0); + Interlocked.Exchange(ref RedisState.TotalSubjectiveServersDown, 0); + Interlocked.Exchange(ref RedisState.TotalObjectiveServersDown, 0); + Interlocked.Exchange(ref RedisState.TotalRetryCount, 0); + Interlocked.Exchange(ref RedisState.TotalRetrySuccess, 0); + Interlocked.Exchange(ref RedisState.TotalRetryTimedout, 0); + } + + public static Dictionary ToDictionary() + { + return new Dictionary + { + {"TotalCommandsSent", TotalCommandsSent}, + {"TotalFailovers", TotalFailovers}, + {"TotalDeactivatedClients", TotalDeactivatedClients}, + {"TotalFailedSentinelWorkers", TotalFailedSentinelWorkers}, + {"TotalForcedMasterFailovers", TotalForcedMasterFailovers}, + {"TotalInvalidMasters", TotalInvalidMasters}, + {"TotalNoMastersFound", TotalNoMastersFound}, + {"TotalClientsCreated", TotalClientsCreated}, + {"TotalClientsCreatedOutsidePool", TotalClientsCreatedOutsidePool}, + {"TotalSubjectiveServersDown", TotalSubjectiveServersDown}, + {"TotalObjectiveServersDown", TotalObjectiveServersDown}, + {"TotalPendingDeactivatedClients", TotalPendingDeactivatedClients }, + {"TotalRetryCount", TotalRetryCount }, + {"TotalRetrySuccess", TotalRetrySuccess }, + {"TotalRetryTimedout", TotalRetryTimedout }, + }; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisSubscription.Async.cs b/src/ServiceStack.Redis/RedisSubscription.Async.cs new file mode 100644 index 00000000..95a861f9 --- /dev/null +++ b/src/ServiceStack.Redis/RedisSubscription.Async.cs @@ -0,0 +1,184 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisSubscription + : IRedisSubscriptionAsync + { + // private events here for +/- semantics + private event Func OnSubscribeAsync; + private event Func OnMessageAsync; + private event Func OnMessageBytesAsync; + private event Func OnUnSubscribeAsync; + + event Func IRedisSubscriptionAsync.OnSubscribeAsync + { + add => OnSubscribeAsync += value; + remove => OnSubscribeAsync -= value; + } + event Func IRedisSubscriptionAsync.OnMessageAsync + { + add => OnMessageAsync += value; + remove => OnMessageAsync -= value; + } + event Func IRedisSubscriptionAsync.OnMessageBytesAsync + { + add => OnMessageBytesAsync += value; + remove => OnMessageBytesAsync -= value; + } + event Func IRedisSubscriptionAsync.OnUnSubscribeAsync + { + add => OnUnSubscribeAsync += value; + remove => OnUnSubscribeAsync -= value; + } + + private IRedisSubscriptionAsync AsAsync() => this; + private IRedisNativeClientAsync NativeAsync + { + get + { + return redisClient as IRedisNativeClientAsync ?? NotAsync(); + static IRedisNativeClientAsync NotAsync() => throw new InvalidOperationException("The underlying client is not async"); + } + } + + private async ValueTask UnSubscribeFromAllChannelsMatchingAnyPatternsAsync(CancellationToken token = default) + { + if (activeChannels.Count == 0) return; + + var multiBytes = await NativeAsync.PUnSubscribeAsync(Array.Empty(), token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + this.activeChannels = new List(); + } + + ValueTask IAsyncDisposable.DisposeAsync() => IsPSubscription + ? UnSubscribeFromAllChannelsMatchingAnyPatternsAsync() + : AsAsync().UnSubscribeFromAllChannelsAsync(); + + async ValueTask IRedisSubscriptionAsync.SubscribeToChannelsAsync(string[] channels, CancellationToken token) + { + var multiBytes = await NativeAsync.SubscribeAsync(channels, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + while (this.SubscriptionCount > 0) + { + multiBytes = await NativeAsync.ReceiveMessagesAsync(token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + } + + async ValueTask IRedisSubscriptionAsync.SubscribeToChannelsMatchingAsync(string[] patterns, CancellationToken token) + { + var multiBytes = await NativeAsync.PSubscribeAsync(patterns, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + while (this.SubscriptionCount > 0) + { + multiBytes = await NativeAsync.ReceiveMessagesAsync(token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + } + + async ValueTask IRedisSubscriptionAsync.UnSubscribeFromAllChannelsAsync(CancellationToken token) + { + if (activeChannels.Count == 0) return; + + var multiBytes = await NativeAsync.UnSubscribeAsync(Array.Empty(), token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + this.activeChannels = new List(); + } + + async ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsAsync(string[] channels, CancellationToken token) + { + var multiBytes = await NativeAsync.UnSubscribeAsync(channels, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + + async ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsMatchingAsync(string[] patterns, CancellationToken token) + { + var multiBytes = await NativeAsync.PUnSubscribeAsync(patterns, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + + private async ValueTask ParseSubscriptionResultsAsync(byte[][] multiBytes) + { + int componentsPerMsg = IsPSubscription ? 4 : 3; + for (var i = 0; i < multiBytes.Length; i += componentsPerMsg) + { + var messageType = multiBytes[i]; + var channel = multiBytes[i + 1].FromUtf8Bytes(); + if (SubscribeWord.AreEqual(messageType) + || PSubscribeWord.AreEqual(messageType)) + { + IsPSubscription = PSubscribeWord.AreEqual(messageType); + + this.SubscriptionCount = int.Parse(multiBytes[i + MsgIndex].FromUtf8Bytes()); + + activeChannels.Add(channel); + + var tmp = OnSubscribeAsync; + if (tmp is object) await tmp.Invoke(channel).ConfigureAwait(false); + } + else if (UnSubscribeWord.AreEqual(messageType) + || PUnSubscribeWord.AreEqual(messageType)) + { + this.SubscriptionCount = int.Parse(multiBytes[i + 2].FromUtf8Bytes()); + + activeChannels.Remove(channel); + + var tmp = OnUnSubscribeAsync; + if (tmp is object) await tmp.Invoke(channel).ConfigureAwait(false); + } + else if (MessageWord.AreEqual(messageType)) + { + var msgBytes = multiBytes[i + MsgIndex]; + var tmp1 = OnMessageBytesAsync; + if (tmp1 is object) await tmp1.Invoke(channel, msgBytes).ConfigureAwait(false); + + var tmp2 = OnMessageAsync; + if (tmp2 is object) + { + var message = msgBytes.FromUtf8Bytes(); + await tmp2.Invoke(channel, message).ConfigureAwait(false); + } + } + else if (PMessageWord.AreEqual(messageType)) + { + channel = multiBytes[i + 2].FromUtf8Bytes(); + var msgBytes = multiBytes[i + MsgIndex + 1]; + var tmp1 = OnMessageBytesAsync; + if (tmp1 is object) await tmp1.Invoke(channel, msgBytes).ConfigureAwait(false); + + var tmp2 = OnMessageAsync; + if (tmp2 is object) + { + var message = msgBytes.FromUtf8Bytes(); + await tmp2.Invoke(channel, message).ConfigureAwait(false); + } + } + else + { + throw new RedisException( + "Invalid state. Expected [[p]subscribe|[p]unsubscribe|message] got: " + messageType.FromUtf8Bytes()); + } + } + } + + ValueTask IRedisSubscriptionAsync.SubscribeToChannelsAsync(params string[] channels) + => AsAsync().SubscribeToChannelsAsync(channels, token: default); + + ValueTask IRedisSubscriptionAsync.SubscribeToChannelsMatchingAsync(params string[] patterns) + => AsAsync().SubscribeToChannelsMatchingAsync(patterns, token: default); + + ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsAsync(params string[] channels) + => AsAsync().UnSubscribeFromChannelsAsync(channels, token: default); + + ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsMatchingAsync(params string[] patterns) + => AsAsync().UnSubscribeFromChannelsMatchingAsync(patterns, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisSubscription.cs b/src/ServiceStack.Redis/RedisSubscription.cs index de494b7d..298b1bf4 100644 --- a/src/ServiceStack.Redis/RedisSubscription.cs +++ b/src/ServiceStack.Redis/RedisSubscription.cs @@ -1,16 +1,15 @@ using System; using System.Collections.Generic; -using ServiceStack.Common.Extensions; using ServiceStack.Text; namespace ServiceStack.Redis { - public class RedisSubscription + public partial class RedisSubscription : IRedisSubscription { private readonly IRedisNativeClient redisClient; private List activeChannels; - public int SubscriptionCount { get; private set; } + public long SubscriptionCount { get; private set; } public bool IsPSubscription { get; private set; } private const int MsgIndex = 2; @@ -31,6 +30,7 @@ public RedisSubscription(IRedisNativeClient redisClient) public Action OnSubscribe { get; set; } public Action OnMessage { get; set; } + public Action OnMessageBytes { get; set; } public Action OnUnSubscribe { get; set; } public void SubscribeToChannels(params string[] channels) @@ -73,10 +73,7 @@ private void ParseSubscriptionResults(byte[][] multiBytes) activeChannels.Add(channel); - if (this.OnSubscribe != null) - { - this.OnSubscribe(channel); - } + this.OnSubscribe?.Invoke(channel); } else if (UnSubscribeWord.AreEqual(messageType) || PUnSubscribeWord.AreEqual(messageType)) @@ -85,28 +82,24 @@ private void ParseSubscriptionResults(byte[][] multiBytes) activeChannels.Remove(channel); - if (this.OnUnSubscribe != null) - { - this.OnUnSubscribe(channel); - } + this.OnUnSubscribe?.Invoke(channel); } else if (MessageWord.AreEqual(messageType)) { - var message = multiBytes[i + MsgIndex].FromUtf8Bytes(); + var msgBytes = multiBytes[i + MsgIndex]; + this.OnMessageBytes?.Invoke(channel, msgBytes); - if (this.OnMessage != null) - { - this.OnMessage(channel, message); - } + var message = msgBytes.FromUtf8Bytes(); + this.OnMessage?.Invoke(channel, message); } else if (PMessageWord.AreEqual(messageType)) { - var message = multiBytes[i + MsgIndex + 1].FromUtf8Bytes(); channel = multiBytes[i + 2].FromUtf8Bytes(); - if (this.OnMessage != null) - { - this.OnMessage(channel, message); - } + var msgBytes = multiBytes[i + MsgIndex + 1]; + this.OnMessageBytes?.Invoke(channel, msgBytes); + + var message = msgBytes.FromUtf8Bytes(); + this.OnMessage?.Invoke(channel, message); } else { diff --git a/src/ServiceStack.Redis/ScanResult.cs b/src/ServiceStack.Redis/ScanResult.cs new file mode 100644 index 00000000..8a2e3248 --- /dev/null +++ b/src/ServiceStack.Redis/ScanResult.cs @@ -0,0 +1,39 @@ +using System.Collections.Generic; +using System.Globalization; + +namespace ServiceStack.Redis +{ + public static class ScanResultExtensions + { + public static List AsStrings(this ScanResult result) + { + return result.Results.Map(x => x.FromUtf8Bytes()); + } + + public static Dictionary AsItemsWithScores(this ScanResult result) + { + var to = new Dictionary(); + for (var i = 0; i < result.Results.Count; i += 2) + { + var key = result.Results[i]; + var score = double.Parse(result.Results[i + 1].FromUtf8Bytes(), + NumberStyles.Float, + CultureInfo.InvariantCulture); + to[key.FromUtf8Bytes()] = score; + } + return to; + } + + public static Dictionary AsKeyValues(this ScanResult result) + { + var to = new Dictionary(); + for (var i = 0; i < result.Results.Count; i += 2) + { + var key = result.Results[i]; + var value = result.Results[i + 1]; + to[key.FromUtf8Bytes()] = value.FromUtf8Bytes(); + } + return to; + } + } +} diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.Core.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.Core.csproj new file mode 100644 index 00000000..a2fe7181 --- /dev/null +++ b/src/ServiceStack.Redis/ServiceStack.Redis.Core.csproj @@ -0,0 +1,35 @@ + + + ServiceStack.Redis.Core + ServiceStack.Redis + ServiceStack.Redis + netstandard2.0;net6.0 + ServiceStack.Redis .NET Standard 2.0 + + .NET Standard 2.0 version of ServiceStack.Redis + + Redis;NoSQL;Client;Distributed;Cache;PubSub;Messaging;Transactions + + + $(DefineConstants);ASYNC_MEMORY + + + $(DefineConstants);ASYNC_MEMORY;NET6_0 + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.Source.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.Source.csproj new file mode 100644 index 00000000..3efa57ed --- /dev/null +++ b/src/ServiceStack.Redis/ServiceStack.Redis.Source.csproj @@ -0,0 +1,44 @@ + + + ServiceStack.Redis + ServiceStack.Redis + netstandard2.0;net6.0 + C# Redis client for the Redis NoSQL DB + + C# Redis Client for the worlds fastest distributed NoSQL datastore. + Byte[], String and POCO Typed clients. + Thread-Safe Basic and Pooled client managers included. + + Redis;NoSQL;Client;Distributed;Cache;PubSub;Messaging;Transactions + false + + + + $(DefineConstants);ASYNC_MEMORY + + + $(DefineConstants);ASYNC_MEMORY;NET6_0 + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.csproj index 87d1f474..4e6274a9 100644 --- a/src/ServiceStack.Redis/ServiceStack.Redis.csproj +++ b/src/ServiceStack.Redis/ServiceStack.Redis.csproj @@ -1,275 +1,47 @@ - - + - Debug - AnyCPU - 9.0.30729 - 2.0 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - Library - Properties - ServiceStack.Redis + ServiceStack.Redis ServiceStack.Redis - 512 - - - 3.5 - - publish\ - true - Disk - false - Foreground - 7 - Days - false - false - true - 0 - 1.0.0.%2a - false - false - true - v3.5 + net472;netstandard2.0;netstandard2.1;net6.0 + C# Redis client for the Redis NoSQL DB + + C# Redis Client for the worlds fastest distributed NoSQL datastore. + Byte[], String and POCO Typed clients. + Thread-Safe Basic and Pooled client managers included. + + Redis;NoSQL;Client;Distributed;Cache;PubSub;Messaging;Transactions - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - AllRules.ruleset + + + $(DefineConstants);NET472 - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - AllRules.ruleset - bin\Release\ServiceStack.Redis.XML + + $(DefineConstants);NETCORE - - true - bin\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset + + $(DefineConstants);ASYNC_MEMORY;NETCORE - - true - bin\MonoTouch\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - - - true - bin\x86\Debug\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - - - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - x86 - prompt - AllRules.ruleset - false - false - bin\x86\Release\ - - - true - bin\x86\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - false - - - true - bin\x86\MonoTouch\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - false - false + + $(DefineConstants);ASYNC_MEMORY;NETCORE;NET6_0 - - ..\..\lib\ServiceStack.Common.dll - - - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\ServiceStack.Text.dll - - - - 3.5 - - - 3.5 - - - 3.5 - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Code - - - - - - - - - - - - + + + - - - False - .NET Framework 3.5 SP1 Client Profile - false - - - False - .NET Framework 3.5 SP1 - true - - - False - Windows Installer 3.1 - true - + + + + + + + + + + + - - + \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.csproj.user b/src/ServiceStack.Redis/ServiceStack.Redis.csproj.user deleted file mode 100644 index daa64de0..00000000 --- a/src/ServiceStack.Redis/ServiceStack.Redis.csproj.user +++ /dev/null @@ -1,14 +0,0 @@ - - - - ProjectFiles - - - - - - - en-US - false - - \ No newline at end of file diff --git a/src/ServiceStack.Redis/ShardedConnectionPool.cs b/src/ServiceStack.Redis/ShardedConnectionPool.cs index 61b238bd..bf6a1526 100644 --- a/src/ServiceStack.Redis/ShardedConnectionPool.cs +++ b/src/ServiceStack.Redis/ShardedConnectionPool.cs @@ -26,7 +26,7 @@ public class ShardedConnectionPool : PooledRedisClientManager public ShardedConnectionPool(string name, int weight, params string[] readWriteHosts) : base(readWriteHosts) { - this.PoolTimeOut = 1000; + this.PoolTimeout = 1000; this.name = name; this.weight = weight; } diff --git a/src/ServiceStack.Redis/Support/Diagnostic/InvokeEventArgs.cs b/src/ServiceStack.Redis/Support/Diagnostic/InvokeEventArgs.cs new file mode 100644 index 00000000..a4d55c42 --- /dev/null +++ b/src/ServiceStack.Redis/Support/Diagnostic/InvokeEventArgs.cs @@ -0,0 +1,18 @@ +using System; +using System.Reflection; + +namespace ServiceStack.Redis.Support.Diagnostic +{ + /// + /// Provides access to the method reflection data as part of the before/after event + /// + public class InvokeEventArgs : EventArgs + { + public MethodInfo MethodInfo { get; private set; } + + public InvokeEventArgs(MethodInfo methodInfo) + { + MethodInfo = methodInfo; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Diagnostic/TrackingFrame.cs b/src/ServiceStack.Redis/Support/Diagnostic/TrackingFrame.cs new file mode 100644 index 00000000..dfdda14d --- /dev/null +++ b/src/ServiceStack.Redis/Support/Diagnostic/TrackingFrame.cs @@ -0,0 +1,36 @@ +using System; + +namespace ServiceStack.Redis.Support.Diagnostic +{ + /// + /// Stores details about the context in which an IRedisClient is allocated. + /// + public class TrackingFrame : IEquatable + { + public Guid Id { get; set; } + + public Type ProvidedToInstanceOfType { get; set; } + + public DateTime Initialised { get; set; } + + public bool Equals(TrackingFrame other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return Id.Equals(other.Id); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((TrackingFrame)obj); + } + + public override int GetHashCode() + { + return Id.GetHashCode(); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs new file mode 100644 index 00000000..7ee72602 --- /dev/null +++ b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs @@ -0,0 +1,58 @@ +#if !(NETSTANDARD2_0 || NETSTANDARD2_1 || NET6_0) +using System; +using System.Reflection; +using System.Runtime.Remoting.Messaging; +using ServiceStack.Logging; + +namespace ServiceStack.Redis.Support.Diagnostic +{ + /// + /// Dynamically proxies access to the IRedisClient providing events for before & after each method invocation + /// + public class TrackingRedisClientProxy : System.Runtime.Remoting.Proxies.RealProxy + { + private static readonly ILog Logger = LogManager.GetLogger(typeof(TrackingRedisClientProxy)); + + private readonly IRedisClient redisClientInstance; + private readonly Guid id; + + public TrackingRedisClientProxy(IRedisClient instance, Guid id) + : base(typeof(IRedisClient)) + { + this.redisClientInstance = instance; + this.id = id; + } + + public event EventHandler BeforeInvoke; + public event EventHandler AfterInvoke; + + public override IMessage Invoke(IMessage msg) + { + // Thanks: http://stackoverflow.com/a/15734124/211978 + + var methodCall = (IMethodCallMessage)msg; + var method = (MethodInfo)methodCall.MethodBase; + + try + { + if (this.BeforeInvoke != null) + { + this.BeforeInvoke(this, new InvokeEventArgs(method)); + } + var result = method.Invoke(this.redisClientInstance, methodCall.InArgs); + if (this.AfterInvoke != null) + { + this.AfterInvoke(this, new InvokeEventArgs(method)); + } + + return new ReturnMessage(result, null, 0, methodCall.LogicalCallContext, methodCall); + } + catch (TargetInvocationException e) + { + Logger.Error("Reflection exception when invoking target method", e); + return new ReturnMessage(e.InnerException, msg as IMethodCallMessage); + } + } + } +} +#endif \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs new file mode 100644 index 00000000..ab8216bd --- /dev/null +++ b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs @@ -0,0 +1,133 @@ +#if !(NETSTANDARD2_0 || NETSTANDARD2_1 || NET6_0) +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Threading; +using ServiceStack.Caching; +using ServiceStack.Logging; + +namespace ServiceStack.Redis.Support.Diagnostic +{ + /// + /// Tracks each IRedisClient instance allocated from the IRedisClientsManager logging when they are allocated and disposed. + /// Periodically writes the allocated instances to the log for diagnostic purposes. + /// + public partial class TrackingRedisClientsManager : IRedisClientsManager + { + private static readonly ILog Logger = LogManager.GetLogger(typeof(TrackingRedisClientsManager)); + + private readonly HashSet trackingFrames = new HashSet(); + private readonly IRedisClientsManager redisClientsManager; + + public TrackingRedisClientsManager(IRedisClientsManager redisClientsManager) + { + this.redisClientsManager = redisClientsManager ?? throw new ArgumentNullException(nameof(redisClientsManager)); + Logger.DebugFormat("Constructed"); + + var timer = new Timer(state => this.DumpState()); + timer.Change(TimeSpan.FromSeconds(30), TimeSpan.FromMinutes(1)); + } + + public void Dispose() + { + Logger.DebugFormat("Disposed"); + this.redisClientsManager.Dispose(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + public IRedisClient GetClient() + { + // get calling instance + var callingStackFrame = new StackFrame(1, true); + var callingMethodType = callingStackFrame.GetMethod(); + + return TrackInstance(callingMethodType, "GetClient", this.redisClientsManager.GetClient()); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + public IRedisClient GetReadOnlyClient() + { + // get calling instance + var callingMethodType = new StackFrame(1, true).GetMethod(); + + return TrackInstance(callingMethodType, "GetReadOnlyClient", this.redisClientsManager.GetReadOnlyClient()); + } + + public ICacheClient GetCacheClient() + { + Logger.DebugFormat("GetCacheClient"); + return this.redisClientsManager.GetCacheClient(); + } + + public ICacheClient GetReadOnlyCacheClient() + { + Logger.DebugFormat("GetReadOnlyCacheClient"); + return this.redisClientsManager.GetReadOnlyCacheClient(); + } + + private IRedisClient TrackInstance(MethodBase callingMethodType, string method, IRedisClient instance) + { + // track + var frame = new TrackingFrame() + { + Id = Guid.NewGuid(), + Initialised = DateTime.Now, + ProvidedToInstanceOfType = callingMethodType.DeclaringType, + }; + lock (this.trackingFrames) + { + this.trackingFrames.Add(frame); + } + + // proxy + var proxy = new TrackingRedisClientProxy(instance, frame.Id); + proxy.BeforeInvoke += (sender, args) => + { + if (string.Compare("Dispose", args.MethodInfo.Name, StringComparison.InvariantCultureIgnoreCase) != 0) + { + return; + } + lock (this.trackingFrames) + { + this.trackingFrames.Remove(frame); + } + var duration = DateTime.Now - frame.Initialised; + + Logger.DebugFormat("{0,18} Disposed {1} released from instance of type {2} checked out for {3}", method, frame.Id, frame.ProvidedToInstanceOfType.FullName, duration); + }; + + Logger.DebugFormat("{0,18} Tracking {1} allocated to instance of type {2}", method, frame.Id, frame.ProvidedToInstanceOfType.FullName); + return proxy.GetTransparentProxy() as IRedisClient; + } + + private void DumpState() + { + Logger.InfoFormat("Dumping currently checked out IRedisClient instances"); + var inUseInstances = new Func(() => + { + lock (this.trackingFrames) + { + return Enumerable.ToArray(this.trackingFrames); + } + }).Invoke(); + + var summaryByType = inUseInstances.GroupBy(x => x.ProvidedToInstanceOfType.FullName); + foreach (var grouped in summaryByType) + { + Logger.InfoFormat("{0,60}: {1,-9} oldest {2}", grouped.Key, grouped.Count(), + grouped.Min(x => x.Initialised)); + } + + foreach (var trackingFrame in inUseInstances) + { + Logger.DebugFormat("Instance {0} allocated to {1} at {2} ({3})", trackingFrame.Id, + trackingFrame.ProvidedToInstanceOfType.FullName, trackingFrame.Initialised, + trackingFrame.ProvidedToInstanceOfType.FullName); + } + } + } +} +#endif \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs b/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs index 44c6d7d4..96c891b1 100644 --- a/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs @@ -8,7 +8,6 @@ namespace ServiceStack.Redis.Support.Locking public class DisposableDistributedLock : IDisposable { private readonly IDistributedLock myLock; - private readonly long lockState; private readonly long lockExpire; private readonly IRedisClient myClient; private readonly string globalLockKey; @@ -25,20 +24,12 @@ public DisposableDistributedLock(IRedisClient client, string globalLockKey, int myLock = new DistributedLock(); myClient = client; this.globalLockKey = globalLockKey; - lockState = myLock.Lock(globalLockKey, acquisitionTimeout, lockTimeout, out lockExpire, myClient); + LockState = myLock.Lock(globalLockKey, acquisitionTimeout, lockTimeout, out lockExpire, myClient); } + public long LockState { get; } - public long LockState - { - get { return lockState; } - } - - public long LockExpire - { - get { return lockExpire; } - } - + public long LockExpire => lockExpire; /// /// unlock diff --git a/src/ServiceStack.Redis/Support/Locking/DistributedLock.Async.cs b/src/ServiceStack.Redis/Support/Locking/DistributedLock.Async.cs new file mode 100644 index 00000000..4f79a42b --- /dev/null +++ b/src/ServiceStack.Redis/Support/Locking/DistributedLock.Async.cs @@ -0,0 +1,121 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Support.Locking +{ + partial class DistributedLock : IDistributedLockAsync + { + public IDistributedLockAsync AsAsync() => this; + + async ValueTask IDistributedLockAsync.LockAsync(string key, int acquisitionTimeout, int lockTimeout, IRedisClientAsync client, CancellationToken token) + { + long lockExpire = 0; + + // cannot lock on a null key + if (key == null) + return new LockState(LOCK_NOT_ACQUIRED, lockExpire); + + const int sleepIfLockSet = 200; + acquisitionTimeout *= 1000; //convert to ms + int tryCount = (acquisitionTimeout / sleepIfLockSet) + 1; + + var ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + var newLockExpire = CalculateLockExpire(ts, lockTimeout); + + var nativeClient = (IRedisNativeClientAsync)client; + long wasSet = await nativeClient.SetNXAsync(key, BitConverter.GetBytes(newLockExpire), token).ConfigureAwait(false); + int totalTime = 0; + while (wasSet == LOCK_NOT_ACQUIRED && totalTime < acquisitionTimeout) + { + int count = 0; + while (wasSet == 0 && count < tryCount && totalTime < acquisitionTimeout) + { + await Task.Delay(sleepIfLockSet).ConfigureAwait(false); + totalTime += sleepIfLockSet; + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); + wasSet = await nativeClient.SetNXAsync(key, BitConverter.GetBytes(newLockExpire), token).ConfigureAwait(false); + count++; + } + // acquired lock! + if (wasSet != LOCK_NOT_ACQUIRED) break; + + // handle possibliity of crashed client still holding the lock + var pipe = client.CreatePipeline(); + await using (pipe.ConfigureAwait(false)) + { + long lockValue = 0; + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).WatchAsync(new[] { key }, token)); + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).GetAsync(key, token), x => lockValue = (x != null) ? BitConverter.ToInt64(x, 0) : 0); + await pipe.FlushAsync(token).ConfigureAwait(false); + + // if lock value is 0 (key is empty), or expired, then we can try to acquire it + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + if (lockValue < ts.TotalSeconds) + { + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); + var trans = await client.CreateTransactionAsync(token).ConfigureAwait(false); + await using (trans.ConfigureAwait(false)) + { + var expire = newLockExpire; + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetAsync(key, BitConverter.GetBytes(expire), token: token)); + if (await trans.CommitAsync(token).ConfigureAwait(false)) + wasSet = LOCK_RECOVERED; //recovered lock! + } + } + else + { + await nativeClient.UnWatchAsync(token).ConfigureAwait(false); + } + } + if (wasSet != LOCK_NOT_ACQUIRED) break; + await Task.Delay(sleepIfLockSet).ConfigureAwait(false); + totalTime += sleepIfLockSet; + } + if (wasSet != LOCK_NOT_ACQUIRED) + { + lockExpire = newLockExpire; + } + return new LockState(wasSet, lockExpire); + } + + async ValueTask IDistributedLockAsync.UnlockAsync(string key, long lockExpire, IRedisClientAsync client, CancellationToken token) + { + if (lockExpire <= 0) + return false; + long lockVal = 0; + var nativeClient = (IRedisNativeClientAsync)client; + var pipe = client.CreatePipeline(); + await using (pipe.ConfigureAwait(false)) + { + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).WatchAsync(new[] { key }, token)); + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).GetAsync(key, token), + x => lockVal = (x != null) ? BitConverter.ToInt64(x, 0) : 0); + await pipe.FlushAsync(token).ConfigureAwait(false); + } + + if (lockVal != lockExpire) + { + if (lockVal != 0) + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been acquired by another client "); + else + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been identifed as a zombie and harvested "); + await nativeClient.UnWatchAsync(token).ConfigureAwait(false); + return false; + } + + var trans = await client.CreateTransactionAsync(token).ConfigureAwait(false); + await using (trans.ConfigureAwait(false)) + { + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).DelAsync(key, token)); + var rc = await trans.CommitAsync(token).ConfigureAwait(false); + if (!rc) + Debug.WriteLine($"Unlock(): Failed to delete key {key}; lock has been acquired by another client "); + return rc; + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs b/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs index 5bca7d51..84d1be68 100644 --- a/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs @@ -3,132 +3,128 @@ namespace ServiceStack.Redis.Support.Locking { - public class DistributedLock : IDistributedLock - { + public partial class DistributedLock : IDistributedLock + { public const int LOCK_NOT_ACQUIRED = 0; public const int LOCK_ACQUIRED = 1; public const int LOCK_RECOVERED = 2; - /// - /// acquire distributed, non-reentrant lock on key - /// - /// global key for this lock - /// timeout for acquiring lock - /// timeout for lock, in seconds (stored as value against lock key) + /// + /// acquire distributed, non-reentrant lock on key + /// + /// global key for this lock + /// timeout for acquiring lock + /// timeout for lock, in seconds (stored as value against lock key) /// /// public virtual long Lock(string key, int acquisitionTimeout, int lockTimeout, out long lockExpire, IRedisClient client) - { - lockExpire = 0; + { + lockExpire = 0; // cannot lock on a null key if (key == null) return LOCK_NOT_ACQUIRED; - const int sleepIfLockSet = 200; - acquisitionTimeout *= 1000; //convert to ms - int tryCount = (acquisitionTimeout / sleepIfLockSet) + 1; + const int sleepIfLockSet = 200; + acquisitionTimeout *= 1000; //convert to ms + int tryCount = (acquisitionTimeout / sleepIfLockSet) + 1; - var ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - var newLockExpire = CalculateLockExpire(ts, lockTimeout); + var ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + var newLockExpire = CalculateLockExpire(ts, lockTimeout); var localClient = (RedisClient)client; - int wasSet = localClient.SetNX(key, BitConverter.GetBytes(newLockExpire)); - int totalTime = 0; + long wasSet = localClient.SetNX(key, BitConverter.GetBytes(newLockExpire)); + int totalTime = 0; while (wasSet == LOCK_NOT_ACQUIRED && totalTime < acquisitionTimeout) - { - int count = 0; - while (wasSet == 0 && count < tryCount && totalTime < acquisitionTimeout) - { - System.Threading.Thread.Sleep(sleepIfLockSet); - totalTime += sleepIfLockSet; - ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - newLockExpire = CalculateLockExpire(ts, lockTimeout); + { + int count = 0; + while (wasSet == 0 && count < tryCount && totalTime < acquisitionTimeout) + { + TaskUtils.Sleep(sleepIfLockSet); + totalTime += sleepIfLockSet; + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); wasSet = localClient.SetNX(key, BitConverter.GetBytes(newLockExpire)); - count++; - } - // acquired lock! + count++; + } + // acquired lock! if (wasSet != LOCK_NOT_ACQUIRED) break; - // handle possibliity of crashed client still holding the lock + // handle possibliity of crashed client still holding the lock using (var pipe = localClient.CreatePipeline()) - { - long lockValue=0; - pipe.QueueCommand(r => ((RedisNativeClient)r).Watch(key)); - pipe.QueueCommand(r => ((RedisNativeClient)r).Get(key), x => lockValue = (x != null) ? BitConverter.ToInt64(x,0) : 0); - pipe.Flush(); + { + long lockValue = 0; + pipe.QueueCommand(r => ((RedisNativeClient)r).Watch(key)); + pipe.QueueCommand(r => ((RedisNativeClient)r).Get(key), x => lockValue = (x != null) ? BitConverter.ToInt64(x, 0) : 0); + pipe.Flush(); - // if lock value is 0 (key is empty), or expired, then we can try to acquire it + // if lock value is 0 (key is empty), or expired, then we can try to acquire it ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - if (lockValue < ts.TotalSeconds) - { - ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - newLockExpire = CalculateLockExpire(ts, lockTimeout); - using (var trans = localClient.CreateTransaction()) - { - var expire = newLockExpire; - trans.QueueCommand(r => ((RedisNativeClient)r).Set(key, BitConverter.GetBytes(expire))); - if (trans.Commit()) - wasSet = LOCK_RECOVERED; //recovered lock! - } - } - else - { + if (lockValue < ts.TotalSeconds) + { + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); + using (var trans = localClient.CreateTransaction()) + { + var expire = newLockExpire; + trans.QueueCommand(r => ((RedisNativeClient)r).Set(key, BitConverter.GetBytes(expire))); + if (trans.Commit()) + wasSet = LOCK_RECOVERED; //recovered lock! + } + } + else + { localClient.UnWatch(); - } - } + } + } if (wasSet != LOCK_NOT_ACQUIRED) break; - System.Threading.Thread.Sleep(sleepIfLockSet); - totalTime += sleepIfLockSet; - } + TaskUtils.Sleep(sleepIfLockSet); + totalTime += sleepIfLockSet; + } if (wasSet != LOCK_NOT_ACQUIRED) { lockExpire = newLockExpire; } - return wasSet; - - } + return wasSet; + } - - /// - /// unlock key - /// - public virtual bool Unlock(string key, long lockExpire, IRedisClient client) - { - if (lockExpire <= 0) - return false; - long lockVal = 0; + /// + /// unlock key + /// + public virtual bool Unlock(string key, long lockExpire, IRedisClient client) + { + if (lockExpire <= 0) + return false; + long lockVal = 0; var localClient = (RedisClient)client; using (var pipe = localClient.CreatePipeline()) { - - pipe.QueueCommand(r => ((RedisNativeClient) r).Watch(key)); - pipe.QueueCommand(r => ((RedisNativeClient) r).Get(key), + + pipe.QueueCommand(r => ((RedisNativeClient)r).Watch(key)); + pipe.QueueCommand(r => ((RedisNativeClient)r).Get(key), x => lockVal = (x != null) ? BitConverter.ToInt64(x, 0) : 0); pipe.Flush(); } - if (lockVal != lockExpire) - { + if (lockVal != lockExpire) + { if (lockVal != 0) - Debug.WriteLine(String.Format("Unlock(): Failed to unlock key {0}; lock has been acquired by another client ", key)); + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been acquired by another client "); else - Debug.WriteLine(String.Format("Unlock(): Failed to unlock key {0}; lock has been identifed as a zombie and harvested ", key)); + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been identifed as a zombie and harvested "); localClient.UnWatch(); - return false; - } + return false; + } using (var trans = localClient.CreateTransaction()) { trans.QueueCommand(r => ((RedisNativeClient)r).Del(key)); - var rc = trans.Commit(); + var rc = trans.Commit(); if (!rc) - Debug.WriteLine(String.Format("Unlock(): Failed to delete key {0}; lock has been acquired by another client ", key)); + Debug.WriteLine($"Unlock(): Failed to delete key {key}; lock has been acquired by another client "); return rc; } - - } - + } /// /// @@ -140,6 +136,5 @@ private static long CalculateLockExpire(TimeSpan ts, int timeout) { return (long)(ts.TotalSeconds + timeout + 1.5); } - - } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/IDistributedLock.Async.cs b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.Async.cs new file mode 100644 index 00000000..7b548b82 --- /dev/null +++ b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.Async.cs @@ -0,0 +1,36 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Support.Locking +{ + /// + /// Distributed lock interface + /// + public interface IDistributedLockAsync + { + // note: can't use "out" with async, so return LockState instead + ValueTask LockAsync(string key, int acquisitionTimeout, int lockTimeout, IRedisClientAsync client, CancellationToken token = default); + ValueTask UnlockAsync(string key, long lockExpire, IRedisClientAsync client, CancellationToken token = default); + } + + public readonly struct LockState + { + public long Result { get; } // kinda feels like this should be an enum; leaving alone for API parity (sync vs async) + public long Expiration { get; } + public LockState(long result, long expiration) + { + Result = result; + Expiration = expiration; + } + public override bool Equals(object obj) => throw new NotSupportedException(); + public override int GetHashCode() => throw new NotSupportedException(); + public override string ToString() => nameof(LockState); + + public void Deconstruct(out long result, out long expiration) + { + result = Result; + expiration = Expiration; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs index 4d331bcc..2f1fc405 100644 --- a/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs @@ -4,8 +4,8 @@ /// Distributed lock interface /// public interface IDistributedLock - { - long Lock(string key, int acquisitionTimeout, int lockTimeout, out long lockExpire, IRedisClient client); - bool Unlock(string key, long lockExpire, IRedisClient client); - } + { + long Lock(string key, int acquisitionTimeout, int lockTimeout, out long lockExpire, IRedisClient client); + bool Unlock(string key, long lockExpire, IRedisClient client); + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs b/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs index 94c97f3f..6a60aab6 100644 --- a/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs +++ b/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs @@ -6,9 +6,9 @@ namespace ServiceStack.Redis.Support.Locking /// Locking strategy interface /// public interface ILockingStrategy - { - IDisposable ReadLock(); + { + IDisposable ReadLock(); - IDisposable WriteLock(); - } + IDisposable WriteLock(); + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs b/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs index 9f6c30b6..70a92057 100644 --- a/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs +++ b/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs @@ -2,16 +2,16 @@ namespace ServiceStack.Redis.Support.Locking { - public class NoLockingStrategy : ILockingStrategy - { - public IDisposable ReadLock() - { - return null; - } + public class NoLockingStrategy : ILockingStrategy + { + public IDisposable ReadLock() + { + return null; + } - public IDisposable WriteLock() - { - return null; - } - } + public IDisposable WriteLock() + { + return null; + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/ReadLock.cs b/src/ServiceStack.Redis/Support/Locking/ReadLock.cs index 847054b5..7cbb7db2 100644 --- a/src/ServiceStack.Redis/Support/Locking/ReadLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/ReadLock.cs @@ -3,30 +3,30 @@ namespace ServiceStack.Redis.Support.Locking { - /// - /// This class manages a read lock for a local readers/writer lock, - /// using the Resource Acquisition Is Initialization pattern - /// - public class ReadLock : IDisposable - { - private readonly ReaderWriterLockSlim lockObject; + /// + /// This class manages a read lock for a local readers/writer lock, + /// using the Resource Acquisition Is Initialization pattern + /// + public class ReadLock : IDisposable + { + private readonly ReaderWriterLockSlim lockObject; - /// - /// RAII initialization - /// - /// - public ReadLock(ReaderWriterLockSlim lockObject) - { - this.lockObject = lockObject; - lockObject.EnterReadLock(); - } + /// + /// RAII initialization + /// + /// + public ReadLock(ReaderWriterLockSlim lockObject) + { + this.lockObject = lockObject; + lockObject.EnterReadLock(); + } - /// - /// RAII disposal - /// - public void Dispose() - { - lockObject.ExitReadLock(); - } - } + /// + /// RAII disposal + /// + public void Dispose() + { + lockObject.ExitReadLock(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs b/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs index f3fba6cc..ba0e8763 100644 --- a/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs +++ b/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs @@ -3,19 +3,18 @@ namespace ServiceStack.Redis.Support.Locking { - public class ReaderWriterLockingStrategy : ILockingStrategy - { - private readonly ReaderWriterLockSlim lockObject = new ReaderWriterLockSlim(); + public class ReaderWriterLockingStrategy : ILockingStrategy + { + private readonly ReaderWriterLockSlim lockObject = new ReaderWriterLockSlim(); + public IDisposable ReadLock() + { + return new ReadLock(lockObject); + } - public IDisposable ReadLock() - { - return new ReadLock(lockObject); - } - - public IDisposable WriteLock() - { - return new WriteLock(lockObject); - } - } + public IDisposable WriteLock() + { + return new WriteLock(lockObject); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/WriteLock.cs b/src/ServiceStack.Redis/Support/Locking/WriteLock.cs index 7de69462..3250dd8f 100644 --- a/src/ServiceStack.Redis/Support/Locking/WriteLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/WriteLock.cs @@ -3,27 +3,27 @@ namespace ServiceStack.Redis.Support.Locking { - public class WriteLock : IDisposable - { - private readonly ReaderWriterLockSlim lockObject; + public class WriteLock : IDisposable + { + private readonly ReaderWriterLockSlim lockObject; - /// - /// This class manages a write lock for a local readers/writer lock, - /// using the Resource Acquisition Is Initialization pattern - /// - /// - public WriteLock(ReaderWriterLockSlim lockObject) - { - this.lockObject = lockObject; - lockObject.EnterWriteLock(); - } + /// + /// This class manages a write lock for a local readers/writer lock, + /// using the Resource Acquisition Is Initialization pattern + /// + /// + public WriteLock(ReaderWriterLockSlim lockObject) + { + this.lockObject = lockObject; + lockObject.EnterWriteLock(); + } - /// - /// RAII disposal - /// - public void Dispose() - { - lockObject.ExitWriteLock(); - } - } + /// + /// RAII disposal + /// + public void Dispose() + { + lockObject.ExitWriteLock(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/ObjectSerializer.cs b/src/ServiceStack.Redis/Support/ObjectSerializer.cs index 11093cfb..5c756903 100644 --- a/src/ServiceStack.Redis/Support/ObjectSerializer.cs +++ b/src/ServiceStack.Redis/Support/ObjectSerializer.cs @@ -1,5 +1,7 @@ using System.IO; +#if !NETCORE using System.Runtime.Serialization.Formatters.Binary; +#endif namespace ServiceStack.Redis.Support { @@ -9,8 +11,9 @@ namespace ServiceStack.Redis.Support /// public class ObjectSerializer : ISerializer { +#if !NETCORE protected readonly BinaryFormatter bf = new BinaryFormatter(); - +#endif /// @@ -20,12 +23,16 @@ public class ObjectSerializer : ISerializer /// public virtual byte[] Serialize(object value) { +#if NETCORE + return null; +#else if (value == null) return null; var memoryStream = new MemoryStream(); memoryStream.Seek(0, 0); bf.Serialize(memoryStream, value); return memoryStream.ToArray(); +#endif } /// @@ -35,6 +42,9 @@ public virtual byte[] Serialize(object value) /// public virtual object Deserialize(byte[] someBytes) { +#if NETCORE + return null; +#else if (someBytes == null) return null; var memoryStream = new MemoryStream(); @@ -42,6 +52,7 @@ public virtual object Deserialize(byte[] someBytes) memoryStream.Seek(0, 0); var de = bf.Deserialize(memoryStream); return de; +#endif } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs b/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs index 8556743c..3d4dbf17 100644 --- a/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs +++ b/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs @@ -1,16 +1,18 @@ using System; using System.IO; using System.Text; +using ServiceStack.Text; +using ServiceStack; namespace ServiceStack.Redis.Support { - /// - /// Optimized implementation. Primitive types are manually serialized, the rest are serialized using binary serializer />. - /// - public class OptimizedObjectSerializer : ObjectSerializer - { - internal const ushort RawDataFlag = 0xfa52; - internal static readonly byte[] EmptyArray = new byte[0]; + /// + /// Optimized implementation. Primitive types are manually serialized, the rest are serialized using binary serializer />. + /// + public class OptimizedObjectSerializer : ObjectSerializer + { + internal const ushort RawDataFlag = 0xfa52; + internal static readonly byte[] EmptyArray = new byte[0]; /// /// @@ -33,109 +35,113 @@ public override object Deserialize(byte[] someBytes) var temp = (SerializedObjectWrapper)base.Deserialize(someBytes); return Unwrap(temp); } - + /// /// serialize value and wrap with /// /// /// - SerializedObjectWrapper SerializeToWrapper(object value) - { - // raw data is a special case when some1 passes in a buffer (byte[] or ArraySegment) - if (value is ArraySegment) - { - // ArraySegment is only passed in when a part of buffer is being - // serialized, usually from a MemoryStream (To avoid duplicating arrays - // the byte[] returned by MemoryStream.GetBuffer is placed into an ArraySegment.) - // - return new SerializedObjectWrapper(RawDataFlag, (ArraySegment)value); - } - - byte[] tmpByteArray = value as byte[]; - - // - or we just received a byte[]. No further processing is needed. - if (tmpByteArray != null) - { - return new SerializedObjectWrapper(RawDataFlag, new ArraySegment(tmpByteArray)); - } - - TypeCode code = value == null ? TypeCode.DBNull : Type.GetTypeCode(value.GetType()); - - byte[] data; - int length = -1; - - switch (code) - { - case TypeCode.DBNull: - data = EmptyArray; - length = 0; - break; - - case TypeCode.String: - data = Encoding.UTF8.GetBytes((string)value); - break; - - case TypeCode.Boolean: - data = BitConverter.GetBytes((bool)value); - break; - - case TypeCode.Int16: - data = BitConverter.GetBytes((short)value); - break; - - case TypeCode.Int32: - data = BitConverter.GetBytes((int)value); - break; - - case TypeCode.Int64: - data = BitConverter.GetBytes((long)value); - break; - - case TypeCode.UInt16: - data = BitConverter.GetBytes((ushort)value); - break; - - case TypeCode.UInt32: - data = BitConverter.GetBytes((uint)value); - break; - - case TypeCode.UInt64: - data = BitConverter.GetBytes((ulong)value); - break; - - case TypeCode.Char: - data = BitConverter.GetBytes((char)value); - break; - - case TypeCode.DateTime: - data = BitConverter.GetBytes(((DateTime)value).ToBinary()); - break; - - case TypeCode.Double: - data = BitConverter.GetBytes((double)value); - break; - - case TypeCode.Single: - data = BitConverter.GetBytes((float)value); - break; - - default: - using (var ms = new MemoryStream()) - { - bf.Serialize(ms, value); - - code = TypeCode.Object; - data = ms.GetBuffer(); - length = (int)ms.Length; - } - break; - } - - if (length < 0) - length = data.Length; - - return new SerializedObjectWrapper((ushort)((ushort)code | 0x0100), new ArraySegment(data, 0, length)); - } + SerializedObjectWrapper SerializeToWrapper(object value) + { + // raw data is a special case when some1 passes in a buffer (byte[] or ArraySegment) + if (value is ArraySegment) + { + // ArraySegment is only passed in when a part of buffer is being + // serialized, usually from a MemoryStream (To avoid duplicating arrays + // the byte[] returned by MemoryStream.GetBuffer is placed into an ArraySegment.) + // + return new SerializedObjectWrapper(RawDataFlag, (ArraySegment)value); + } + + byte[] tmpByteArray = value as byte[]; + + // - or we just received a byte[]. No further processing is needed. + if (tmpByteArray != null) + { + return new SerializedObjectWrapper(RawDataFlag, new ArraySegment(tmpByteArray)); + } + + TypeCode code = value == null ? TypeCode.DBNull : value.GetType().GetTypeCode(); + + byte[] data; + int length = -1; + + switch (code) + { + case TypeCode.DBNull: + data = EmptyArray; + length = 0; + break; + + case TypeCode.String: + data = Encoding.UTF8.GetBytes((string)value); + break; + + case TypeCode.Boolean: + data = BitConverter.GetBytes((bool)value); + break; + + case TypeCode.Int16: + data = BitConverter.GetBytes((short)value); + break; + + case TypeCode.Int32: + data = BitConverter.GetBytes((int)value); + break; + + case TypeCode.Int64: + data = BitConverter.GetBytes((long)value); + break; + + case TypeCode.UInt16: + data = BitConverter.GetBytes((ushort)value); + break; + + case TypeCode.UInt32: + data = BitConverter.GetBytes((uint)value); + break; + + case TypeCode.UInt64: + data = BitConverter.GetBytes((ulong)value); + break; + + case TypeCode.Char: + data = BitConverter.GetBytes((char)value); + break; + + case TypeCode.DateTime: + data = BitConverter.GetBytes(((DateTime)value).ToBinary()); + break; + + case TypeCode.Double: + data = BitConverter.GetBytes((double)value); + break; + + case TypeCode.Single: + data = BitConverter.GetBytes((float)value); + break; + + default: +#if NETCORE + data = new byte[0]; + length = 0; +#else + using (var ms = new MemoryStream()) + { + bf.Serialize(ms, value); + code = TypeCode.Object; + data = ms.GetBuffer(); + length = (int)ms.Length; + } +#endif + break; + } + + if (length < 0) + length = data.Length; + + return new SerializedObjectWrapper((ushort)((ushort)code | 0x0100), new ArraySegment(data, 0, length)); + } /// /// Unwrap object wrapped in @@ -143,94 +149,97 @@ SerializedObjectWrapper SerializeToWrapper(object value) /// /// object Unwrap(SerializedObjectWrapper item) - { - if (item.Data.Array == null) - return null; + { + if (item.Data.Array == null) + return null; - if (item.Flags == RawDataFlag) - { - ArraySegment tmp = item.Data; + if (item.Flags == RawDataFlag) + { + ArraySegment tmp = item.Data; - if (tmp.Count == tmp.Array.Length) - return tmp.Array; + if (tmp.Count == tmp.Array.Length) + return tmp.Array; - // we should never arrive here, but it's better to be safe than sorry - var retval = new byte[tmp.Count]; + // we should never arrive here, but it's better to be safe than sorry + var retval = new byte[tmp.Count]; - Array.Copy(tmp.Array, tmp.Offset, retval, 0, tmp.Count); + Array.Copy(tmp.Array, tmp.Offset, retval, 0, tmp.Count); - return retval; - } + return retval; + } - var code = (TypeCode)(item.Flags & 0x00ff); + var code = (TypeCode)(item.Flags & 0x00ff); - byte[] data = item.Data.Array; - int offset = item.Data.Offset; - int count = item.Data.Count; + byte[] data = item.Data.Array; + int offset = item.Data.Offset; + int count = item.Data.Count; - switch (code) - { - // incrementing a non-existing key then getting it - // returns as a string, but the flag will be 0 - // so treat all 0 flagged items as string - // this may help inter-client data management as well - // - // however we store 'null' as Empty + an empty array, - // so this must special-cased for compatibilty with - // earlier versions. we introduced DBNull as null marker in emc2.6 - case TypeCode.Empty: - return (data == null || count == 0) - ? null - : Encoding.UTF8.GetString(data, offset, count); + switch (code) + { + // incrementing a non-existing key then getting it + // returns as a string, but the flag will be 0 + // so treat all 0 flagged items as string + // this may help inter-client data management as well + // + // however we store 'null' as Empty + an empty array, + // so this must special-cased for compatibilty with + // earlier versions. we introduced DBNull as null marker in emc2.6 + case TypeCode.Empty: + return (data == null || count == 0) + ? null + : Encoding.UTF8.GetString(data, offset, count); - case TypeCode.DBNull: - return null; + case TypeCode.DBNull: + return null; - case TypeCode.String: - return Encoding.UTF8.GetString(data, offset, count); + case TypeCode.String: + return Encoding.UTF8.GetString(data, offset, count); - case TypeCode.Boolean: - return BitConverter.ToBoolean(data, offset); + case TypeCode.Boolean: + return BitConverter.ToBoolean(data, offset); - case TypeCode.Int16: - return BitConverter.ToInt16(data, offset); + case TypeCode.Int16: + return BitConverter.ToInt16(data, offset); - case TypeCode.Int32: - return BitConverter.ToInt32(data, offset); + case TypeCode.Int32: + return BitConverter.ToInt32(data, offset); - case TypeCode.Int64: - return BitConverter.ToInt64(data, offset); + case TypeCode.Int64: + return BitConverter.ToInt64(data, offset); - case TypeCode.UInt16: - return BitConverter.ToUInt16(data, offset); + case TypeCode.UInt16: + return BitConverter.ToUInt16(data, offset); - case TypeCode.UInt32: - return BitConverter.ToUInt32(data, offset); + case TypeCode.UInt32: + return BitConverter.ToUInt32(data, offset); - case TypeCode.UInt64: - return BitConverter.ToUInt64(data, offset); + case TypeCode.UInt64: + return BitConverter.ToUInt64(data, offset); - case TypeCode.Char: - return BitConverter.ToChar(data, offset); + case TypeCode.Char: + return BitConverter.ToChar(data, offset); - case TypeCode.DateTime: - return DateTime.FromBinary(BitConverter.ToInt64(data, offset)); + case TypeCode.DateTime: + return DateTime.FromBinary(BitConverter.ToInt64(data, offset)); - case TypeCode.Double: - return BitConverter.ToDouble(data, offset); + case TypeCode.Double: + return BitConverter.ToDouble(data, offset); - case TypeCode.Single: - return BitConverter.ToSingle(data, offset); + case TypeCode.Single: + return BitConverter.ToSingle(data, offset); - case TypeCode.Object: - using (var ms = new MemoryStream(data, offset, count)) - { - return bf.Deserialize(ms); - } + case TypeCode.Object: + using (var ms = new MemoryStream(data, offset, count)) + { +#if NETCORE + return null; +#else + return bf.Deserialize(ms); +#endif + } - default: throw new InvalidOperationException("Unknown TypeCode was returned: " + code); - } - } - } + default: throw new InvalidOperationException("Unknown TypeCode was returned: " + code); + } + } + } } - diff --git a/src/ServiceStack.Redis/Support/OrderedDictionary.cs b/src/ServiceStack.Redis/Support/OrderedDictionary.cs index 654f446b..636921d5 100644 --- a/src/ServiceStack.Redis/Support/OrderedDictionary.cs +++ b/src/ServiceStack.Redis/Support/OrderedDictionary.cs @@ -16,8 +16,7 @@ public class OrderedDictionary : IOrderedDictionary private static readonly string KeyTypeName = typeof(TKey).FullName; private static readonly string ValueTypeName = typeof(TValue).FullName; - private static readonly bool ValueTypeIsReferenceType = !typeof(ValueType).IsAssignableFrom(typeof(TValue)); - + private static readonly bool ValueTypeIsReferenceType = !typeof(ValueType).IsAssignableFrom(typeof(TValue)); private Dictionary dictionary; private List> list; private readonly IEqualityComparer comparer; diff --git a/src/ServiceStack.Redis/Support/Queue/Implementation/RedisWorkQueue.cs b/src/ServiceStack.Redis/Support/Queue/Implementation/RedisWorkQueue.cs index 94df5fb8..357ccfba 100644 --- a/src/ServiceStack.Redis/Support/Queue/Implementation/RedisWorkQueue.cs +++ b/src/ServiceStack.Redis/Support/Queue/Implementation/RedisWorkQueue.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using ServiceStack.Text; namespace ServiceStack.Redis.Support.Queue.Implementation @@ -6,36 +7,34 @@ namespace ServiceStack.Redis.Support.Queue.Implementation /// /// distributed work item queue /// - public class RedisWorkQueue + public class RedisWorkQueue { protected readonly RedisNamespace queueNamespace; protected string pendingWorkItemIdQueue; protected string workQueue; protected readonly PooledRedisClientManager clientManager; - public RedisWorkQueue(int maxReadPoolSize, int maxWritePoolSize, string host, int port) : - this(maxReadPoolSize, maxWritePoolSize, host, port, null) - { - - } + public RedisWorkQueue(int maxReadPoolSize, int maxWritePoolSize, string host, int port) + : this(maxReadPoolSize, maxWritePoolSize, host, port, null) { } - public RedisWorkQueue(int maxReadPoolSize, int maxWritePoolSize, string host, int port, string queueName ) + public RedisWorkQueue(int maxReadPoolSize, int maxWritePoolSize, string host, int port, string queueName) { - var qname = queueName ?? typeof (T) + "_Shared_Work_Queue"; + var qname = queueName ?? typeof(T) + "_Shared_Work_Queue"; queueNamespace = new RedisNamespace(qname); pendingWorkItemIdQueue = queueNamespace.GlobalCacheKey("PendingWorkItemIdQueue"); - workQueue = queueNamespace.GlobalCacheKey( "WorkQueue"); + workQueue = queueNamespace.GlobalCacheKey("WorkQueue"); var poolConfig = new RedisClientManagerConfig - { - MaxReadPoolSize = maxReadPoolSize, - MaxWritePoolSize = maxWritePoolSize - }; - - clientManager = new PooledRedisClientManager(new List() { host + ":" + port.ToString() },new List(), poolConfig) - { - RedisClientFactory = new SerializingRedisClientFactory() - }; + { + MaxReadPoolSize = maxReadPoolSize, + MaxWritePoolSize = maxWritePoolSize + }; + + clientManager = new PooledRedisClientManager(new[] { host + ":" + port }, + TypeConstants.EmptyStringArray, + poolConfig) { + RedisResolver = { ClientFactory = config => new SerializingRedisClient(config) } + }; } public void Dispose() diff --git a/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs b/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs index 2fda9e45..e53c28cf 100644 --- a/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs +++ b/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs @@ -1,17 +1,20 @@ using System.Collections; using System.Collections.Generic; -using ServiceStack.Redis.Support.Locking; namespace ServiceStack.Redis.Support.Queue.Implementation { public class SerializingRedisClient : RedisClient { private ISerializer serializer = new ObjectSerializer(); + + public SerializingRedisClient(string host) + : base(host) {} + + public SerializingRedisClient(RedisEndpoint config) + : base(config) {} public SerializingRedisClient(string host, int port) - : base(host, port) - { - } + : base(host, port) {} /// /// customize the client serializer @@ -58,10 +61,10 @@ public object Deserialize(byte[] someBytes) return serializer.Deserialize(someBytes); } + /// /// deserialize an array of byte arrays /// /// - /// public IList Deserialize(byte[][] byteArray) { IList rc = new ArrayList(); diff --git a/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClientFactory.cs b/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClientFactory.cs deleted file mode 100644 index f6488d38..00000000 --- a/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClientFactory.cs +++ /dev/null @@ -1,15 +0,0 @@ -namespace ServiceStack.Redis.Support.Queue.Implementation -{ - /// - /// Factory to create SerializingRedisClient objects - /// - public class SerializingRedisClientFactory : IRedisClientFactory - { - public static SerializingRedisClientFactory Instance = new SerializingRedisClientFactory(); - - public RedisClient CreateRedisClient(string host, int port) - { - return new SerializingRedisClient(host, port); - } - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs b/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs index e66b7aec..0f06bed2 100644 --- a/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs +++ b/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs @@ -2,12 +2,12 @@ namespace ServiceStack.Redis.Support { - /// - /// wraps a serialized representation of an object - /// - /// - [Serializable] - public struct SerializedObjectWrapper + /// + /// wraps a serialized representation of an object + /// + /// + [Serializable] + public struct SerializedObjectWrapper { private ArraySegment data; private ushort flags; @@ -28,8 +28,8 @@ public SerializedObjectWrapper(ushort flags, ArraySegment data) /// public ArraySegment Data { - get { return data; } - set { data = value; } + get => data; + set => data = value; } /// @@ -37,8 +37,8 @@ public ArraySegment Data /// public ushort Flags { - get { return flags; } - set { flags = value; } + get => flags; + set => flags = value; } } } diff --git a/src/ServiceStack.Redis/Transaction/RedisTransaction.Async.cs b/src/ServiceStack.Redis/Transaction/RedisTransaction.Async.cs new file mode 100644 index 00000000..43ca79c8 --- /dev/null +++ b/src/ServiceStack.Redis/Transaction/RedisTransaction.Async.cs @@ -0,0 +1,129 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Pipeline; + +namespace ServiceStack.Redis +{ + /// + /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). + /// + public partial class RedisTransaction + : IRedisTransactionAsync, IRedisQueueCompletableOperationAsync + { + /// + /// Issue exec command (not queued) + /// + private async ValueTask ExecAsync(CancellationToken token) + { + RedisClient.Exec(); + await RedisClient.FlushSendBufferAsync(token).ConfigureAwait(false); + RedisClient.ResetSendBuffer(); + } + + + /// + /// Put "QUEUED" messages at back of queue + /// + partial void QueueExpectQueuedAsync() + { + QueuedCommands.Insert(0, new QueuedRedisOperation + { + }.WithAsyncReadCommand(RedisClient.ExpectQueuedAsync)); + } + + async ValueTask IRedisTransactionAsync.CommitAsync(CancellationToken token) + { + bool rc = true; + try + { + numCommands = QueuedCommands.Count / 2; + + //insert multi command at beginning + QueuedCommands.Insert(0, new QueuedRedisCommand + { + }.WithAsyncReturnCommand(VoidReturnCommandAsync: r => { Init(); return default; }) + .WithAsyncReadCommand(RedisClient.ExpectOkAsync)); + + //the first half of the responses will be "QUEUED", + // so insert reading of multiline after these responses + QueuedCommands.Insert(numCommands + 1, new QueuedRedisOperation + { + OnSuccessIntCallback = handleMultiDataResultCount + }.WithAsyncReadCommand(RedisClient.ReadMultiDataResultCountAsync)); + + // add Exec command at end (not queued) + QueuedCommands.Add(new RedisCommand + { + }.WithAsyncReturnCommand(r => ExecAsync(token))); + + //execute transaction + await ExecAsync(token).ConfigureAwait(false); + + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (RedisTransactionFailedException) + { + rc = false; + } + finally + { + RedisClient.Transaction = null; + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + return rc; + } + + ValueTask IRedisTransactionAsync.RollbackAsync(CancellationToken token) + { + Rollback(); // not currently anything different to do on the async path + return default; + } + // note: this also means that Dispose doesn't need to be complex; if Rollback needed + // splitting, we would need to override DisposeAsync and split the code, too + + + private protected override async ValueTask ReplayAsync(CancellationToken token) + { + bool rc = true; + try + { + await ExecuteAsync().ConfigureAwait(false); + + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (RedisTransactionFailedException) + { + rc = false; + } + finally + { + RedisClient.Transaction = null; + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + return rc; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Transaction/RedisTransaction.cs b/src/ServiceStack.Redis/Transaction/RedisTransaction.cs index e443d0f0..db2c36b2 100644 --- a/src/ServiceStack.Redis/Transaction/RedisTransaction.cs +++ b/src/ServiceStack.Redis/Transaction/RedisTransaction.cs @@ -1,41 +1,46 @@ // -// https://github.com/mythz/ServiceStack.Redis +// https://github.com/ServiceStack/ServiceStack.Redis // ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system // // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // -// Licensed under the same terms of Redis and ServiceStack: new BSD license. +// Licensed under the same terms of ServiceStack. // using System; -using System.Collections.Generic; using ServiceStack.Redis.Pipeline; namespace ServiceStack.Redis { - /// - /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - /// - public class RedisTransaction - : RedisAllPurposePipeline, IRedisTransaction, IRedisQueueCompletableOperation - { - private int _numCommands = 0; - public RedisTransaction(RedisClient redisClient) : base(redisClient) - { - - } + /// + /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). + /// + public partial class RedisTransaction + : RedisAllPurposePipeline, IRedisTransaction, IRedisQueueCompletableOperation + { + private int numCommands = 0; + public RedisTransaction(RedisClient redisClient) + : this(redisClient, false) {} + + internal RedisTransaction(RedisClient redisClient, bool isAsync) + : base(redisClient) + { + // if someone casts between sync/async: the sync-over-async or + // async-over-sync is entirely self-inflicted; I can't fix stupid + _isAsync = isAsync; + } protected override void Init() { - //start pipelining - base.Init(); - //queue multi command - RedisClient.Multi(); - //set transaction - RedisClient.Transaction = this; + //start pipelining + base.Init(); + //queue multi command + RedisClient.Multi(); + //set transaction + RedisClient.Transaction = this; } /// @@ -44,63 +49,56 @@ protected override void Init() /// private void QueueExpectQueued() { - QueuedCommands.Insert(0, new QueuedRedisOperation() - { - VoidReadCommand = RedisClient.ExpectQueued - }); + QueuedCommands.Insert(0, new QueuedRedisOperation + { + VoidReadCommand = RedisClient.ExpectQueued + }); } - /// /// Issue exec command (not queued) /// private void Exec() { RedisClient.Exec(); - RedisClient.FlushSendBuffer(); - + RedisClient.FlushAndResetSendBuffer(); } - public bool Commit() + public bool Commit() { - bool rc = true; + bool rc = true; try { - _numCommands = QueuedCommands.Count / 2; + numCommands = QueuedCommands.Count / 2; //insert multi command at beginning - QueuedCommands.Insert(0, new QueuedRedisCommand() - { + QueuedCommands.Insert(0, new QueuedRedisCommand { VoidReturnCommand = r => Init(), VoidReadCommand = RedisClient.ExpectOk, }); - //the first half of the responses will be "QUEUED", // so insert reading of multiline after these responses - QueuedCommands.Insert(_numCommands + 1, new QueuedRedisOperation() - { + QueuedCommands.Insert(numCommands + 1, new QueuedRedisOperation { IntReadCommand = RedisClient.ReadMultiDataResultCount, OnSuccessIntCallback = handleMultiDataResultCount }); // add Exec command at end (not queued) - QueuedCommands.Add(new RedisCommand() - { + QueuedCommands.Add(new RedisCommand { VoidReturnCommand = r => Exec() }); //execute transaction Exec(); - - ///////////////////////////// + //receive expected results foreach (var queuedCommand in QueuedCommands) { queuedCommand.ProcessResult(); } } - catch (RedisTransactionFailedException e) + catch (RedisTransactionFailedException) { rc = false; } @@ -110,7 +108,7 @@ public bool Commit() ClosePipeline(); RedisClient.AddTypeIdsRegisteredDuringPipeline(); } - return rc; + return rc; } /// @@ -122,41 +120,40 @@ private void handleMultiDataResultCount(int count) // transaction failed due to WATCH condition if (count == -1) throw new RedisTransactionFailedException(); - if (count != _numCommands) + if (count != numCommands) throw new InvalidOperationException(string.Format( "Invalid results received from 'EXEC', expected '{0}' received '{1}'" + "\nWarning: Transaction was committed", - _numCommands, count)); + numCommands, count)); } - public void Rollback() - { - if (RedisClient.Transaction == null) - throw new InvalidOperationException("There is no current transaction to Rollback"); + public void Rollback() + { + if (RedisClient.Transaction == null) + throw new InvalidOperationException("There is no current transaction to Rollback"); - RedisClient.Transaction = null; - RedisClient.ClearTypeIdsRegisteredDuringPipeline(); - } + RedisClient.Transaction = null; + RedisClient.ClearTypeIdsRegisteredDuringPipeline(); + } - public bool Replay() + public override bool Replay() { bool rc = true; try { Execute(); - ///////////////////////////// //receive expected results foreach (var queuedCommand in QueuedCommands) { queuedCommand.ProcessResult(); } } - catch (RedisTransactionFailedException e) + catch (RedisTransactionFailedException) { - rc = false; + rc = false; } - finally + finally { RedisClient.Transaction = null; ClosePipeline(); @@ -165,20 +162,26 @@ public bool Replay() return rc; } - public void Dispose() - { + public override void Dispose() + { base.Dispose(); if (RedisClient.Transaction == null) return; - Rollback(); + Rollback(); } - #region Overrides of RedisQueueCompletableOperation methods - + private readonly bool _isAsync; protected override void AddCurrentQueuedOperation() { base.AddCurrentQueuedOperation(); - QueueExpectQueued(); + if (_isAsync) + { + QueueExpectQueuedAsync(); + } + else + { + QueueExpectQueued(); + } } - #endregion + partial void QueueExpectQueuedAsync(); } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/UtilExtensions.cs b/src/ServiceStack.Redis/UtilExtensions.cs index 33ddf1c1..aac1f9f9 100644 --- a/src/ServiceStack.Redis/UtilExtensions.cs +++ b/src/ServiceStack.Redis/UtilExtensions.cs @@ -1,16 +1,13 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; +using System.Collections.Generic; using ServiceStack.Text; namespace ServiceStack.Redis { internal static class UtilExtensions { - public static List ConvertEachTo(this IEnumerable list) + public static List ConvertEachTo(this List list) { - var to = new List(); + var to = new List(list.Count); foreach (var item in list) { to.Add(JsonSerializer.DeserializeFromString(item)); diff --git a/src/ServiceStack.Redis/ValueTask_Utils.Async.cs b/src/ServiceStack.Redis/ValueTask_Utils.Async.cs new file mode 100644 index 00000000..87f6c917 --- /dev/null +++ b/src/ServiceStack.Redis/ValueTask_Utils.Async.cs @@ -0,0 +1,166 @@ +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Internal +{ + internal static class ValueTask_Utils + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + _ = pending.Result; // for IValueTaskSource reasons + return default; + } + else + { + return Awaited(pending); + } + static async ValueTask Awaited(ValueTask pending) + => await pending.ConfigureAwait(false); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending, Func projection) + { + return pending.IsCompletedSuccessfully ? projection(pending.Result).AsValueTaskResult() : Awaited(pending, projection); + static async ValueTask Awaited(ValueTask pending, Func projection) + => projection(await pending.ConfigureAwait(false)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AsInt32(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? (checked((int)pending.Result)).AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => checked((int)await pending.ConfigureAwait(false)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending, Func projection, TState state) + { + return pending.IsCompletedSuccessfully ? projection(pending.Result, state).AsValueTaskResult() : Awaited(pending, projection, state); + static async ValueTask Awaited(ValueTask pending, Func projection, TState state) + => projection(await pending.ConfigureAwait(false), state); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AwaitAsTrue(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + pending.GetAwaiter().GetResult(); // for IValueTaskSource reasons + return s_ValueTaskTrue; + } + else + { + return Awaited(pending); + } + static async ValueTask Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static Task AwaitAsTrueTask(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + pending.GetAwaiter().GetResult(); // for IValueTaskSource reasons + return s_TaskTrue; + } + else + { + return Awaited(pending); + } + static async Task Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AwaitAsTrue(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + _ = pending.Result; // for IValueTaskSource reasons + return s_ValueTaskTrue; + } + else + { + return Awaited(pending); + } + static async ValueTask Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask IsSuccessAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? (pending.Result == RedisNativeClient.Success).AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)) == RedisNativeClient.Success; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static Task IsSuccessTaskAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? (pending.Result == RedisNativeClient.Success ? s_TaskTrue : s_TaskFalse) : Awaited(pending); + static async Task Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)) == RedisNativeClient.Success; + } + + static readonly Task s_TaskTrue = Task.FromResult(true), s_TaskFalse = Task.FromResult(false); + + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask> ConvertEachToAsync(this ValueTask> pending) + { + return pending.IsCompletedSuccessfully ? pending.Result.ConvertEachTo().AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask> pending) + => (await pending.ConfigureAwait(false)).ConvertEachTo(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask> ToStringListAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? pending.Result.ToStringList().AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)).ToStringList(); + } + + private static readonly ValueTask s_ValueTaskTrue = true.AsValueTaskResult(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending, T result) + { + return pending.IsCompletedSuccessfully ? result.AsValueTaskResult() : Awaited(pending, result); + static async ValueTask Awaited(ValueTask pending, T result) + { + await pending.ConfigureAwait(false); + return result; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AsValueTaskResult(this T value) => new ValueTask(value); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask FromUtf8BytesAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? pending.Result.FromUtf8Bytes().AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)).FromUtf8Bytes(); + } + } +} diff --git a/src/StackExchangeTester/App.config b/src/StackExchangeTester/App.config new file mode 100644 index 00000000..731f6de6 --- /dev/null +++ b/src/StackExchangeTester/App.config @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/src/StackExchangeTester/Program.cs b/src/StackExchangeTester/Program.cs new file mode 100644 index 00000000..def9166c --- /dev/null +++ b/src/StackExchangeTester/Program.cs @@ -0,0 +1,22 @@ +using ServiceStack.Redis; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace StackExchangeTester +{ + class Program + { + static void Main(string[] args) + { + var x = new RedisManagerPool("MQHnkdl402DScXzhZIxHwDaA7s8nziy45okp84ykShA=@tls-11.redis.cache.windows.net:6380?ssl=true&sslprotocols=Tls11"); + var y = x.GetClient(); + y.Ping(); + y.Set("keyServiceStackSllChangesIStillHave512mb", "value"); + y.Dispose(); + x.Dispose(); + } + } +} diff --git a/src/StackExchangeTester/StackExchangeTester.csproj b/src/StackExchangeTester/StackExchangeTester.csproj new file mode 100644 index 00000000..83e8e98a --- /dev/null +++ b/src/StackExchangeTester/StackExchangeTester.csproj @@ -0,0 +1,85 @@ + + + + + Debug + AnyCPU + {0214D0F0-EA41-4593-B558-71F974CF7C62} + Exe + StackExchangeTester + StackExchangeTester + v4.6.1 + 512 + true + true + + + AnyCPU + true + full + false + bin\Debug\ + DEBUG;TRACE + prompt + 4 + + + AnyCPU + pdbonly + true + bin\Release\ + TRACE + prompt + 4 + + + + + + + + + + + + + + + + + + + + + {8368c965-b4f6-4263-9abb-731a175b2e77} + Console.Tests + + + {91c55091-a946-49b5-9517-8794ebcc5784} + ServiceStack.Redis.Tests.Sentinel + + + {951d28ee-5d22-4c62-ac0f-1661a8ceec5a} + ServiceStack.Redis.Tests + + + {af99f19b-4c04-4f58-81ef-b092f1fcc540} + ServiceStack.Redis + + + + + {EDE973DE-4C9A-11DE-A33F-06DC55D89593} + 1 + 0 + 0 + tlbimp + False + True + + + + + + + \ No newline at end of file diff --git a/src/TestMqHost/App.config b/src/TestMqHost/App.config new file mode 100644 index 00000000..58262a1f --- /dev/null +++ b/src/TestMqHost/App.config @@ -0,0 +1,6 @@ + + + + + + diff --git a/src/TestMqHost/Program.cs b/src/TestMqHost/Program.cs new file mode 100644 index 00000000..623d0df1 --- /dev/null +++ b/src/TestMqHost/Program.cs @@ -0,0 +1,112 @@ +using System; +using System.Threading; +using ServiceStack; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Redis.Messaging; +using ServiceStack.Text; + +namespace TestMqHost +{ + public class Incr + { + public int Value { get; set; } + } + + class Program + { + static void Main2(string[] args) + { + var sbLogFactory = new StringBuilderLogFactory(); + LogManager.LogFactory = sbLogFactory; + var log = LogManager.GetLogger(typeof(Program)); + + var clientManager = new PooledRedisClientManager(new[] { "localhost" }) + { + PoolTimeout = 1000, + }; + + var mqHost = new RedisMqServer(clientManager, retryCount: 2); + + var msgsProcessed = 0; + var sum = 0; + mqHost.RegisterHandler(c => + { + var dto = c.GetBody(); + sum += dto.Value; + log.InfoFormat("Received {0}, sum: {1}", dto.Value, sum); + msgsProcessed++; + return null; + }); + + mqHost.Start(); + + 10.Times(i => + { + ThreadPool.QueueUserWorkItem(x => + { + using (var client = mqHost.CreateMessageQueueClient()) + { + try + { + log.InfoFormat("Publish: {0}...", i); + client.Publish(new Incr { Value = i }); + } + catch (Exception ex) + { + log.InfoFormat("Start Publish exception: {0}", ex.Message); + clientManager.GetClientPoolActiveStates().PrintDump(); + clientManager.GetReadOnlyClientPoolActiveStates().PrintDump(); + } + Thread.Sleep(10); + } + }); + }); + + ThreadPool.QueueUserWorkItem(_ => + { + using (var client = (RedisClient)clientManager.GetClient()) + { + client.SetConfig("timeout", "1"); + var clientAddrs = client.GetClientList().ConvertAll(x => x["addr"]); + log.InfoFormat("Killing clients: {0}...", clientAddrs.Dump()); + + try + { + clientAddrs.ForEach(client.ClientKill); + } + catch (Exception ex) + { + log.InfoFormat("Client exception: {0}", ex.Message); + } + } + }); + + 20.Times(i => + { + using (var client = mqHost.CreateMessageQueueClient()) + { + try + { + log.InfoFormat("Publish: {0}...", i); + client.Publish(new Incr { Value = i }); + } + catch (Exception ex) + { + log.InfoFormat("Publish exception: {0}", ex.Message); + clientManager.GetClientPoolActiveStates().PrintDump(); + clientManager.GetReadOnlyClientPoolActiveStates().PrintDump(); + } + } + + Thread.Sleep(1000); + }); + + Thread.Sleep(2000); + "Messages processed: {0}".Print(msgsProcessed); + "Logs: ".Print(); + sbLogFactory.GetLogs().Print(); + Console.ReadKey(); + } + } +} diff --git a/src/TestMqHost/Program2.cs b/src/TestMqHost/Program2.cs new file mode 100644 index 00000000..50b7b240 --- /dev/null +++ b/src/TestMqHost/Program2.cs @@ -0,0 +1,80 @@ +using System; +using System.Diagnostics; +using System.Threading; +using ServiceStack.Redis; +using ServiceStack.Redis.Messaging; +using ServiceStack.Text; + +namespace TestMqHost +{ + class Program2 + { + + static void Main(string[] args) + { + var clientManager = new PooledRedisClientManager(new[] { "localhost" }) + { + PoolTimeout = 1000, + }; + using (var client = clientManager.GetClient()) + { + client.FlushAll(); + } + + var mqHost = new RedisMqServer(clientManager); + + var msgsProcessed = 0; + var msgsQueued = 0; + var sum = 0; + mqHost.RegisterHandler(c => + { + var dto = c.GetBody(); + sum += dto.Value; + Console.WriteLine("Received {0}, sum: {1}", dto.Value, sum); + msgsProcessed++; + return null; + }); + + mqHost.Start(); + var processes = Process.GetProcessesByName("redis-server"); + var timer = new Timer(s => + { + using (var client = mqHost.MessageFactory.CreateMessageProducer()) + { + try + { + client.Publish(new Incr { Value = 1 }); + msgsQueued++; + Console.WriteLine("Message #{0} published.", msgsQueued); + } + catch { } + } + }, null, TimeSpan.Zero, TimeSpan.FromSeconds(1)); + + Thread.Sleep(5000); + timer.Change(Timeout.Infinite, Timeout.Infinite); + Thread.Sleep(1000); + + int msgsQueuedBeforeKill = msgsQueued; + int msgsProcessedBeforeKill = msgsProcessed; + processes[0].Kill(); + + timer.Change(TimeSpan.Zero, TimeSpan.FromSeconds(1)); + Thread.Sleep(15000); + timer.Dispose(); + + Thread.Sleep(1000); + + mqHost.GetStats().PrintDump(); + mqHost.GetStatus().Print(); + + "Messages queued before kill: {0}".Print(msgsQueuedBeforeKill); + "Messages processed before kill: {0}".Print(msgsProcessedBeforeKill); + + "Messages queued: {0}".Print(msgsQueued); + "Messages processed: {0}".Print(msgsProcessed); + + Console.ReadKey(); + } + } +} \ No newline at end of file diff --git a/src/TestMqHost/Properties/AssemblyInfo.cs b/src/TestMqHost/Properties/AssemblyInfo.cs new file mode 100644 index 00000000..48539a86 --- /dev/null +++ b/src/TestMqHost/Properties/AssemblyInfo.cs @@ -0,0 +1,36 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("TestMqHost")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("Microsoft")] +[assembly: AssemblyProduct("TestMqHost")] +[assembly: AssemblyCopyright("Copyright © Microsoft 2013")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("6df3bc57-6380-4f2f-8d02-731f04b2aec7")] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("4.0.0.0")] +[assembly: AssemblyFileVersion("4.0.0.0")] diff --git a/src/TestMqHost/TestMqHost.csproj b/src/TestMqHost/TestMqHost.csproj new file mode 100644 index 00000000..14f23ae4 --- /dev/null +++ b/src/TestMqHost/TestMqHost.csproj @@ -0,0 +1,78 @@ + + + + + Debug + AnyCPU + {B932C136-4365-4C37-8187-96703715EBB4} + Exe + Properties + TestMqHost + TestMqHost + v4.0 + 512 + + + + AnyCPU + true + full + false + bin\Debug\ + DEBUG;TRACE + prompt + 4 + + + AnyCPU + pdbonly + true + bin\Release\ + TRACE + prompt + 4 + + + + False + ..\..\lib\net45\ServiceStack.Common.dll + + + False + ..\..\lib\net45\ServiceStack.Interfaces.dll + + + False + ..\..\lib\net45\ServiceStack.Text.dll + + + + + + + + + + + + + + + + + + + + {af99f19b-4c04-4f58-81ef-b092f1fcc540} + ServiceStack.Redis + + + + + \ No newline at end of file diff --git a/src/sentinel/orig/redis-6380/redis.windows.conf b/src/sentinel/orig/redis-6380/redis.windows.conf new file mode 100644 index 00000000..91324109 --- /dev/null +++ b/src/sentinel/orig/redis-6380/redis.windows.conf @@ -0,0 +1,890 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################ GENERAL ##################################### + +# On Windows, daemonize and pidfile are not supported. +# However, you can run redis as a Windows service, and specify a logfile. +# The logfile will contain the pid. + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +# port 6379 +port 6380 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# loglevel notice +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename "dump.rdb" + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# The Linux version of Redis relies on the system call fork() to perform +# point-in-time snapshots of the heap. In addition to the AOF and RDB backup +# mechanism, the master-slave synchronization and clustering features are +# dependent on this behavior of fork(). In order for the Windows version to +# perform like the Linux version we had to simulate this aspect of fork(). +# Doing so meant moving the Redis heap into a memory mapped file that can +# be shared with a child process. +# +# *** There must be disk space available for this file in order for Redis +# to launch. *** The default configuration places this file in the local +# appdata directory. If you wish to move this file to another local disk, +# use the heapdir flag as described below. +# +# The maxheap flag controls the maximum size of this memory mapped file, +# as well as the total usable space for the Redis heap. Running Redis +# without either maxheap or maxmemory will result in a memory mapped file +# being created that is equal to the size of physical memory. During +# fork() operations the total page file commit will max out at around: +# +# (size of physical memory) + (2 * size of maxheap) +# +# For instance, on a machine with 8GB of physical RAM, the max page file +# commit with the default maxheap size will be (8)+(2*8) GB , or 24GB. The +# default page file sizing of Windows will allow for this without having +# to reconfigure the system. Larger heap sizes are possible, but the maximum +# page file size will have to be increased accordingly. +# +# The Redis heap must be larger than the value specified by the maxmemory +# flag, as the heap allocator has its own memory requirements and +# fragmentation of the heap is inevitable. If only the maxmemory flag is +# specified, maxheap will be set at 1.5*maxmemory. If the maxheap flag is +# specified along with maxmemory, the maxheap flag will be automatically +# increased if it is smaller than 1.5*maxmemory. +# +# maxheap +maxheap 1gb + +# The heap memory mapped file must reside on a local path for heap sharing +# between processes to work. A UNC path will not suffice here. For maximum +# performance this should be located on the fastest local drive available. +# This value defaults to the local application data folder(e.g., +# "%USERPROFILE%\AppData\Local"). Since this file can be very large, you +# may wish to place this on a drive other than the one the operating system +# is installed on. +# +# Note that you must specify a directory here, not a file name. +# heapdir +heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380 + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unneccessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will disable the creation of +# the memory mapped heap file, redirect heap allocations to the system heap +# allocator, and disable commands that would otherwise cause fork() operations: +# BGSAVE and BGREWRITEAOF. This flag may not be combined with any of the other +# flags that configure AOF and RDB operations. +# persistence-available [(yes)|no] + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/src/sentinel/orig/redis-6380/sentinel.conf b/src/sentinel/orig/redis-6380/sentinel.conf new file mode 100644 index 00000000..6d743a88 --- /dev/null +++ b/src/sentinel/orig/redis-6380/sentinel.conf @@ -0,0 +1,180 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26380 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/orig/redis-6381/redis.windows.conf b/src/sentinel/orig/redis-6381/redis.windows.conf new file mode 100644 index 00000000..13e49137 --- /dev/null +++ b/src/sentinel/orig/redis-6381/redis.windows.conf @@ -0,0 +1,892 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################ GENERAL ##################################### + +# On Windows, daemonize and pidfile are not supported. +# However, you can run redis as a Windows service, and specify a logfile. +# The logfile will contain the pid. + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +# port 6379 +port 6381 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# loglevel notice +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename "dump.rdb" + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# The Linux version of Redis relies on the system call fork() to perform +# point-in-time snapshots of the heap. In addition to the AOF and RDB backup +# mechanism, the master-slave synchronization and clustering features are +# dependent on this behavior of fork(). In order for the Windows version to +# perform like the Linux version we had to simulate this aspect of fork(). +# Doing so meant moving the Redis heap into a memory mapped file that can +# be shared with a child process. +# +# *** There must be disk space available for this file in order for Redis +# to launch. *** The default configuration places this file in the local +# appdata directory. If you wish to move this file to another local disk, +# use the heapdir flag as described below. +# +# The maxheap flag controls the maximum size of this memory mapped file, +# as well as the total usable space for the Redis heap. Running Redis +# without either maxheap or maxmemory will result in a memory mapped file +# being created that is equal to the size of physical memory. During +# fork() operations the total page file commit will max out at around: +# +# (size of physical memory) + (2 * size of maxheap) +# +# For instance, on a machine with 8GB of physical RAM, the max page file +# commit with the default maxheap size will be (8)+(2*8) GB , or 24GB. The +# default page file sizing of Windows will allow for this without having +# to reconfigure the system. Larger heap sizes are possible, but the maximum +# page file size will have to be increased accordingly. +# +# The Redis heap must be larger than the value specified by the maxmemory +# flag, as the heap allocator has its own memory requirements and +# fragmentation of the heap is inevitable. If only the maxmemory flag is +# specified, maxheap will be set at 1.5*maxmemory. If the maxheap flag is +# specified along with maxmemory, the maxheap flag will be automatically +# increased if it is smaller than 1.5*maxmemory. +# +# maxheap +maxheap 1gb + +# The heap memory mapped file must reside on a local path for heap sharing +# between processes to work. A UNC path will not suffice here. For maximum +# performance this should be located on the fastest local drive available. +# This value defaults to the local application data folder(e.g., +# "%USERPROFILE%\AppData\Local"). Since this file can be very large, you +# may wish to place this on a drive other than the one the operating system +# is installed on. +# +# Note that you must specify a directory here, not a file name. +# heapdir +heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381 + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unneccessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will disable the creation of +# the memory mapped heap file, redirect heap allocations to the system heap +# allocator, and disable commands that would otherwise cause fork() operations: +# BGSAVE and BGREWRITEAOF. This flag may not be combined with any of the other +# flags that configure AOF and RDB operations. +# persistence-available [(yes)|no] + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +slaveof 127.0.0.1 6380 + diff --git a/src/sentinel/orig/redis-6381/sentinel.conf b/src/sentinel/orig/redis-6381/sentinel.conf new file mode 100644 index 00000000..a0b77a3b --- /dev/null +++ b/src/sentinel/orig/redis-6381/sentinel.conf @@ -0,0 +1,180 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26381 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/orig/redis-6382/redis.windows.conf b/src/sentinel/orig/redis-6382/redis.windows.conf new file mode 100644 index 00000000..1764dfe3 --- /dev/null +++ b/src/sentinel/orig/redis-6382/redis.windows.conf @@ -0,0 +1,891 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################ GENERAL ##################################### + +# On Windows, daemonize and pidfile are not supported. +# However, you can run redis as a Windows service, and specify a logfile. +# The logfile will contain the pid. + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +# port 6379 +port 6382 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# loglevel notice +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename "dump.rdb" + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# The Linux version of Redis relies on the system call fork() to perform +# point-in-time snapshots of the heap. In addition to the AOF and RDB backup +# mechanism, the master-slave synchronization and clustering features are +# dependent on this behavior of fork(). In order for the Windows version to +# perform like the Linux version we had to simulate this aspect of fork(). +# Doing so meant moving the Redis heap into a memory mapped file that can +# be shared with a child process. +# +# *** There must be disk space available for this file in order for Redis +# to launch. *** The default configuration places this file in the local +# appdata directory. If you wish to move this file to another local disk, +# use the heapdir flag as described below. +# +# The maxheap flag controls the maximum size of this memory mapped file, +# as well as the total usable space for the Redis heap. Running Redis +# without either maxheap or maxmemory will result in a memory mapped file +# being created that is equal to the size of physical memory. During +# fork() operations the total page file commit will max out at around: +# +# (size of physical memory) + (2 * size of maxheap) +# +# For instance, on a machine with 8GB of physical RAM, the max page file +# commit with the default maxheap size will be (8)+(2*8) GB , or 24GB. The +# default page file sizing of Windows will allow for this without having +# to reconfigure the system. Larger heap sizes are possible, but the maximum +# page file size will have to be increased accordingly. +# +# The Redis heap must be larger than the value specified by the maxmemory +# flag, as the heap allocator has its own memory requirements and +# fragmentation of the heap is inevitable. If only the maxmemory flag is +# specified, maxheap will be set at 1.5*maxmemory. If the maxheap flag is +# specified along with maxmemory, the maxheap flag will be automatically +# increased if it is smaller than 1.5*maxmemory. +# +# maxheap +maxheap 1gb + +# The heap memory mapped file must reside on a local path for heap sharing +# between processes to work. A UNC path will not suffice here. For maximum +# performance this should be located on the fastest local drive available. +# This value defaults to the local application data folder(e.g., +# "%USERPROFILE%\AppData\Local"). Since this file can be very large, you +# may wish to place this on a drive other than the one the operating system +# is installed on. +# +# Note that you must specify a directory here, not a file name. +# heapdir +heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382 + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unneccessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will disable the creation of +# the memory mapped heap file, redirect heap allocations to the system heap +# allocator, and disable commands that would otherwise cause fork() operations: +# BGSAVE and BGREWRITEAOF. This flag may not be combined with any of the other +# flags that configure AOF and RDB operations. +# persistence-available [(yes)|no] + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +slaveof 127.0.0.1 6380 diff --git a/src/sentinel/orig/redis-6382/sentinel.conf b/src/sentinel/orig/redis-6382/sentinel.conf new file mode 100644 index 00000000..7e0a5003 --- /dev/null +++ b/src/sentinel/orig/redis-6382/sentinel.conf @@ -0,0 +1,180 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26382 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6380/redis.conf b/src/sentinel/redis-6380/redis.conf new file mode 100644 index 00000000..faf4eaf4 --- /dev/null +++ b/src/sentinel/redis-6380/redis.conf @@ -0,0 +1,5 @@ +# Relative to ./src/sentinel +include redis.conf + +port 6380 +dir ./redis-6380/state diff --git a/src/sentinel/redis-6380/redis.windows.conf b/src/sentinel/redis-6380/redis.windows.conf new file mode 100644 index 00000000..c187d09b --- /dev/null +++ b/src/sentinel/redis-6380/redis.windows.conf @@ -0,0 +1,964 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################ GENERAL ##################################### + +# On Windows, daemonize and pidfile are not supported. +# However, you can run redis as a Windows service, and specify a logfile. +# The logfile will contain the pid. + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +# port 6379 +port 6380 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# loglevel notice +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename "dump.rdb" + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. +# persistence-available [(yes)|no] + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/src/sentinel/redis-6380/sentinel.conf b/src/sentinel/redis-6380/sentinel.conf new file mode 100644 index 00000000..561fdb58 --- /dev/null +++ b/src/sentinel/redis-6380/sentinel.conf @@ -0,0 +1,7 @@ +# Relative to ./src/sentinel + +port 26380 +dir ./redis-6380/state +sentinel monitor mymaster 127.0.0.1 6380 2 +protected-mode no + diff --git a/src/sentinel/redis-6380/sentinel.windows.conf b/src/sentinel/redis-6380/sentinel.windows.conf new file mode 100644 index 00000000..cd89e805 --- /dev/null +++ b/src/sentinel/redis-6380/sentinel.windows.conf @@ -0,0 +1,181 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26380 +bind 127.0.0.1 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6381/redis.conf b/src/sentinel/redis-6381/redis.conf new file mode 100644 index 00000000..0d21c604 --- /dev/null +++ b/src/sentinel/redis-6381/redis.conf @@ -0,0 +1,6 @@ +# Relative to ./src/sentinel +include redis.conf + +port 6381 +dir ./redis-6381/state +slaveof 127.0.0.1 6380 diff --git a/src/sentinel/redis-6381/redis.windows.conf b/src/sentinel/redis-6381/redis.windows.conf new file mode 100644 index 00000000..70b73a7e --- /dev/null +++ b/src/sentinel/redis-6381/redis.windows.conf @@ -0,0 +1,966 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################ GENERAL ##################################### + +# On Windows, daemonize and pidfile are not supported. +# However, you can run redis as a Windows service, and specify a logfile. +# The logfile will contain the pid. + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +# port 6379 +port 6381 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# loglevel notice +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename "dump.rdb" + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. +# persistence-available [(yes)|no] + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +slaveof 127.0.0.1 6380 + diff --git a/src/sentinel/redis-6381/sentinel.conf b/src/sentinel/redis-6381/sentinel.conf new file mode 100644 index 00000000..1b730fbf --- /dev/null +++ b/src/sentinel/redis-6381/sentinel.conf @@ -0,0 +1,7 @@ +# Relative to ./src/sentinel + +port 26381 +dir ./redis-6381/state +sentinel monitor mymaster 127.0.0.1 6380 2 +protected-mode no + diff --git a/src/sentinel/redis-6381/sentinel.windows.conf b/src/sentinel/redis-6381/sentinel.windows.conf new file mode 100644 index 00000000..c85c7d09 --- /dev/null +++ b/src/sentinel/redis-6381/sentinel.windows.conf @@ -0,0 +1,181 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26381 +bind 127.0.0.1 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6382/redis.conf b/src/sentinel/redis-6382/redis.conf new file mode 100644 index 00000000..2ddf17cc --- /dev/null +++ b/src/sentinel/redis-6382/redis.conf @@ -0,0 +1,6 @@ +# Relative to ./src/sentinel +include redis.conf + +port 6382 +dir ./redis-6382/state +slaveof 127.0.0.1 6380 diff --git a/src/sentinel/redis-6382/redis.windows.conf b/src/sentinel/redis-6382/redis.windows.conf new file mode 100644 index 00000000..3c857473 --- /dev/null +++ b/src/sentinel/redis-6382/redis.windows.conf @@ -0,0 +1,965 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################ GENERAL ##################################### + +# On Windows, daemonize and pidfile are not supported. +# However, you can run redis as a Windows service, and specify a logfile. +# The logfile will contain the pid. + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +# port 6379 +port 6382 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# loglevel notice +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename "dump.rdb" + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that trnasfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. +# persistence-available [(yes)|no] + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeot, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are perforemd with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf +slaveof 127.0.0.1 6380 diff --git a/src/sentinel/redis-6382/sentinel.conf b/src/sentinel/redis-6382/sentinel.conf new file mode 100644 index 00000000..d52c0824 --- /dev/null +++ b/src/sentinel/redis-6382/sentinel.conf @@ -0,0 +1,7 @@ +# Relative to ./src/sentinel + +port 26382 +dir ./redis-6382/state +sentinel monitor mymaster 127.0.0.1 6380 2 +protected-mode no + diff --git a/src/sentinel/redis-6382/sentinel.windows.conf b/src/sentinel/redis-6382/sentinel.windows.conf new file mode 100644 index 00000000..1aca1b25 --- /dev/null +++ b/src/sentinel/redis-6382/sentinel.windows.conf @@ -0,0 +1,181 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26382 +bind 127.0.0.1 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis.conf b/src/sentinel/redis.conf new file mode 100644 index 00000000..c446925f --- /dev/null +++ b/src/sentinel/redis.conf @@ -0,0 +1,940 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################ GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +protected-mode no diff --git a/src/sentinel/redis/redis-cli.exe b/src/sentinel/redis/redis-cli.exe new file mode 100644 index 00000000..03e8b172 Binary files /dev/null and b/src/sentinel/redis/redis-cli.exe differ diff --git a/src/sentinel/redis/redis-server.exe b/src/sentinel/redis/redis-server.exe new file mode 100644 index 00000000..e8cd2d3c Binary files /dev/null and b/src/sentinel/redis/redis-server.exe differ diff --git a/src/sentinel/reset.cmd b/src/sentinel/reset.cmd new file mode 100644 index 00000000..f7dc64ee --- /dev/null +++ b/src/sentinel/reset.cmd @@ -0,0 +1,14 @@ +copy /v/y orig\redis-6380\* redis-6380 +copy /v/y orig\redis-6381\* redis-6381 +copy /v/y orig\redis-6382\* redis-6382 + +del /F /Q redis-6380\dump.rdb +del /F /Q redis-6381\dump.rdb +del /F /Q redis-6382\dump.rdb + +redis\redis-cli -p 6380 SHUTDOWN NOSAVE +redis\redis-cli -p 6381 SHUTDOWN NOSAVE +redis\redis-cli -p 6382 SHUTDOWN NOSAVE +redis\redis-cli -p 26380 SHUTDOWN NOSAVE +redis\redis-cli -p 26381 SHUTDOWN NOSAVE +redis\redis-cli -p 26382 SHUTDOWN NOSAVE diff --git a/src/sentinel/sentinel.conf b/src/sentinel/sentinel.conf new file mode 100644 index 00000000..776e6091 --- /dev/null +++ b/src/sentinel/sentinel.conf @@ -0,0 +1,182 @@ +# Example sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26379 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir /tmp + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6379 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel down-after-milliseconds mymaster 30000 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel parallel-syncs mymaster 1 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel failover-timeout mymaster 180000 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh + +protected-mode no + diff --git a/src/sentinel/startAll.cmd b/src/sentinel/startAll.cmd new file mode 100644 index 00000000..3a4db540 --- /dev/null +++ b/src/sentinel/startAll.cmd @@ -0,0 +1,11 @@ +start cmd.exe /e:on /k "redis\redis-server redis-6380\redis.windows.conf" +start cmd.exe /e:on /k "redis\redis-server redis-6381\redis.windows.conf" +start cmd.exe /e:on /k "redis\redis-server redis-6382\redis.windows.conf" + +start cmd.exe /e:on /k "redis\redis-server redis-6380\sentinel.windows.conf --sentinel" +start cmd.exe /e:on /k "redis\redis-server redis-6381\sentinel.windows.conf --sentinel" +start cmd.exe /e:on /k "redis\redis-server redis-6382\sentinel.windows.conf --sentinel" + +pause + +redis\redis-cli -p 26380 sentinel masters diff --git a/src/sentinel/startAll.sh b/src/sentinel/startAll.sh new file mode 100755 index 00000000..7019d631 --- /dev/null +++ b/src/sentinel/startAll.sh @@ -0,0 +1,17 @@ +redis_server=redis-server +redis_sentinel=redis-sentinel +redis_cli=redis-cli + +$redis_server redis-6380/redis.conf & +$redis_sentinel redis-6380/sentinel.conf & + +$redis_server redis-6381/redis.conf & +$redis_sentinel redis-6381/sentinel.conf & + +$redis_server redis-6382/redis.conf & +$redis_sentinel redis-6382/sentinel.conf & + +read -n1 -r -p "Press any key to see sentinel info on masters and slaves..." + +$redis_cli -p 26380 sentinel master mymaster +$redis_cli -p 26381 sentinel slaves mymaster diff --git a/src/sentinel/stopAll.sh b/src/sentinel/stopAll.sh new file mode 100755 index 00000000..c6a712de --- /dev/null +++ b/src/sentinel/stopAll.sh @@ -0,0 +1,9 @@ +redis_cli=redis-cli + +$redis_cli -p 26382 SHUTDOWN NOSAVE +$redis_cli -p 26381 SHUTDOWN NOSAVE +$redis_cli -p 26380 SHUTDOWN NOSAVE + +$redis_cli -p 6382 SHUTDOWN NOSAVE +$redis_cli -p 6381 SHUTDOWN NOSAVE +$redis_cli -p 6380 SHUTDOWN NOSAVE diff --git a/src/servicestack.snk b/src/servicestack.snk new file mode 100644 index 00000000..dade7cea Binary files /dev/null and b/src/servicestack.snk differ diff --git a/tests/Console.Tests/BlockingPop.cs b/tests/Console.Tests/BlockingPop.cs new file mode 100644 index 00000000..90aef4c5 --- /dev/null +++ b/tests/Console.Tests/BlockingPop.cs @@ -0,0 +1,77 @@ +using System; +using ServiceStack; +using ServiceStack.Logging; +using ServiceStack.Redis; + +namespace ConsoleTests +{ + public class BlockingPop + { + public void Execute() + { + LogManager.LogFactory = new ConsoleLogFactory(); + var log = LogManager.LogFactory.GetLogger("redistest"); + + // ******************** + // set REDIS CONFIGS + // ******************** + RedisConfig.DefaultConnectTimeout = 1 * 1000; + RedisConfig.DefaultSendTimeout = 1 * 1000; + RedisConfig.DefaultReceiveTimeout = 1 * 1000; + //RedisConfig.DefaultRetryTimeout = 15 * 1000; + RedisConfig.DefaultIdleTimeOutSecs = 240; + RedisConfig.BackOffMultiplier = 10; + RedisConfig.BufferPoolMaxSize = 500000; + RedisConfig.VerifyMasterConnections = true; + RedisConfig.HostLookupTimeoutMs = 1000; + RedisConfig.DeactivatedClientsExpiry = TimeSpan.FromSeconds(15); + RedisConfig.EnableVerboseLogging = true; + + var redisManager = new RedisManagerPool("localhost?connectTimeout=1000"); + + // how many test items to create + var items = 5; + // how long to try popping + var waitForSeconds = 30; + // name of list + var listId = "testlist"; + + var startedAt = DateTime.Now; + + log.Info("--------------------------"); + log.Info("push {0} items to a list, then try pop for {1} seconds. repeat.".Fmt(items, waitForSeconds)); + log.Info("--------------------------"); + + using (var redis = redisManager.GetClient()) + { + do + { + // add items to list + for (int i = 1; i <= items; i++) + { + redis.PushItemToList(listId, $"item {i}"); + } + + do + { + var item = redis.BlockingPopItemFromList(listId, null); + + // log the popped item. if BRPOP timeout is null and list empty, I do not expect to print anything + log.InfoFormat("{0}", string.IsNullOrEmpty(item) ? " list empty " : item); + + System.Threading.Thread.Sleep(1000); + + } while (DateTime.Now - startedAt < TimeSpan.FromSeconds(waitForSeconds)); + + log.Info("--------------------------"); + log.Info("completed first loop"); + log.Info("--------------------------"); + + } while (DateTime.Now - startedAt < TimeSpan.FromSeconds(2 * waitForSeconds)); + + log.Info("--------------------------"); + log.Info("completed outer loop"); + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/BlockingRemoveAfterReconnection.cs b/tests/Console.Tests/BlockingRemoveAfterReconnection.cs new file mode 100644 index 00000000..b085d9ed --- /dev/null +++ b/tests/Console.Tests/BlockingRemoveAfterReconnection.cs @@ -0,0 +1,35 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class BlockingRemoveAfterReconnection + { + protected internal static RedisManagerPool BasicRedisClientManager; + + public void Execute() + { + //RedisConfig.AssumeServerVersion = 4000; + RedisConfig.DefaultConnectTimeout = 20 * 1000; + RedisConfig.DefaultRetryTimeout = 20 * 1000; + BasicRedisClientManager = new RedisManagerPool(); + try + { + using (var client = BasicRedisClientManager.GetClient()) + { + Console.WriteLine("Blocking..."); + var fromList = client.BlockingRemoveStartFromList("AnyQueue", TimeSpan.FromMinutes(20)); + Console.WriteLine($"Received: {fromList.Dump()}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/BrPopAfterReconnection.cs b/tests/Console.Tests/BrPopAfterReconnection.cs new file mode 100644 index 00000000..f5867f06 --- /dev/null +++ b/tests/Console.Tests/BrPopAfterReconnection.cs @@ -0,0 +1,66 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class BrPopAfterReconnection + { + protected internal static BasicRedisClientManager BasicRedisClientManager; + + public void Execute() + { +// RedisConfig.AssumeServerVersion = 4000; +// RedisConfig.DisableVerboseLogging = false; +// LogManager.LogFactory = new ConsoleLogFactory(); + + var host = "localhost"; + var port = "6379"; + var db = "9"; + + var redisUri = $"{host}:{port}?db={db}"; + + BasicRedisClientManager = new BasicRedisClientManager(redisUri); + var queue = "FormSaved"; + + while (true) + { + Task.Run(() => BlockingReceive(queue)); + Thread.Sleep(1000); + + Console.WriteLine("Restart Redis and press Enter..."); + Console.ReadLine(); + + Console.WriteLine("Enter something:"); + var item = Console.ReadLine(); + + if (!string.IsNullOrWhiteSpace(item)) + { + using (var client = BasicRedisClientManager.GetClient()) + { + client.AddItemToList(queue, item); + } + + Console.WriteLine("Item added"); + } + + Thread.Sleep(1000); + } + } + + public static void BlockingReceive(string queue) + { + using (var client = BasicRedisClientManager.GetReadOnlyClient()) + { + Console.WriteLine($"Listening to {queue}"); + + var fromList = client.BlockingPopItemFromList(queue, TimeSpan.FromSeconds(60)); + + Console.WriteLine($"Received:{fromList.Dump()}"); + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/Console.Tests.csproj b/tests/Console.Tests/Console.Tests.csproj new file mode 100644 index 00000000..5b260ab0 --- /dev/null +++ b/tests/Console.Tests/Console.Tests.csproj @@ -0,0 +1,17 @@ + + + + Exe + net6.0 + Console.Tests + + + + + + + + + + + diff --git a/tests/Console.Tests/DbSelectConnectionStringIssue.cs b/tests/Console.Tests/DbSelectConnectionStringIssue.cs new file mode 100644 index 00000000..49d453f5 --- /dev/null +++ b/tests/Console.Tests/DbSelectConnectionStringIssue.cs @@ -0,0 +1,39 @@ +using System; +using System.Threading; +using ServiceStack; +using ServiceStack.Logging; +using ServiceStack.Redis; + +namespace ConsoleTests; + +class DbSelectConnectionStringIssue +{ + public void Execute() + { + LogManager.LogFactory = new ConsoleLogFactory(); + + Licensing.RegisterLicense(""); + + var redisManagerPool = new RedisManagerPool("redis://redisHost?db=7"); + + for (int i = 0; i < 5; i++) + { + try + { + using (IRedisClient client = redisManagerPool.GetClient()) + { + string value = client.GetValue("status"); + + Console.WriteLine($"Successfully retrieved value => '{value}'"); + } + } + catch (Exception ex) + { + Console.WriteLine($"Exception handled \n{ex}"); + } + + Console.WriteLine("Sleeping for 25 seconds to allow client to be garbage collected"); + Thread.Sleep(TimeSpan.FromSeconds(25)); + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/ForceFailover.cs b/tests/Console.Tests/ForceFailover.cs new file mode 100644 index 00000000..7ae40136 --- /dev/null +++ b/tests/Console.Tests/ForceFailover.cs @@ -0,0 +1,75 @@ +using System; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class ForceFailover + { + public void Execute() + { + RedisConfig.EnableVerboseLogging = false; + LogManager.LogFactory = new ConsoleLogFactory(debugEnabled:true); + + var sentinel = new RedisSentinel(new [] { + "127.0.0.1:26380", + "127.0.0.1:26381", + "127.0.0.1:26382", + }, "mymaster"); + + var redisManager = sentinel.Start(); + + using (var client = redisManager.GetClient()) + { + client.FlushAll(); + } + + using (var client = redisManager.GetClient()) + { + client.IncrementValue("counter").ToString().Print(); + } + + "Force 'SENTINEL failover mymaster' then press enter...".Print(); + Console.ReadLine(); + + try + { + using (var client = redisManager.GetClient()) + { + client.IncrementValue("counter").ToString().Print(); + } + } + catch (Exception ex) + { + ex.Message.Print(); + } + + try + { + using (var client = redisManager.GetClient()) + { + client.IncrementValue("counter").ToString().Print(); + } + } + catch (Exception ex) + { + ex.Message.Print(); + } + + try + { + using (var client = redisManager.GetClient()) + { + client.IncrementValue("counter").ToString().Print(); + } + } + catch (Exception ex) + { + ex.Message.Print(); + } + + Console.ReadLine(); + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/GoogleRedisSentinelFailoverTests.cs b/tests/Console.Tests/GoogleRedisSentinelFailoverTests.cs new file mode 100644 index 00000000..c2581eb9 --- /dev/null +++ b/tests/Console.Tests/GoogleRedisSentinelFailoverTests.cs @@ -0,0 +1,30 @@ +using ServiceStack.Redis; + +namespace ConsoleTests +{ + public class GoogleRedisSentinelFailoverTests : RedisSentinelFailoverTests + { + //gcloud compute instances list + //url: https://cloud.google.com/sdk/gcloud/reference/compute/instances/list + public static string[] SentinelHosts = new[] + { + "130.211.149.172", + "130.211.191.163", + "146.148.61.165", + }; + + protected override RedisSentinel CreateSentinel() + { + var sentinel = new RedisSentinel(SentinelHosts, "master") + { + IpAddressMap = + { + {"10.240.109.243", "130.211.149.172"}, + {"10.240.201.29", "130.211.191.163"}, + {"10.240.200.252", "146.148.61.165"}, + } + }; + return sentinel; + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/HashCollectionStressTests.cs b/tests/Console.Tests/HashCollectionStressTests.cs new file mode 100644 index 00000000..e0a37b2a --- /dev/null +++ b/tests/Console.Tests/HashCollectionStressTests.cs @@ -0,0 +1,289 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using ServiceStack.Redis; +using ServiceStack.Redis.Generic; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class HashCollectionStressTests + { + private IRedisClientsManager clientsManager; + private RedisCachedCollection redisCollection; + + private int running = 0; + private long writeCount = 0; + private long readCount = 0; + + public void Execute(string ipAddress, int noOfThreads = 64) + { + clientsManager = new PooledRedisClientManager(ipAddress); + + redisCollection = new RedisCachedCollection( + clientsManager, "Threads: " + 64); + + var StartedAt = DateTime.UtcNow; + Interlocked.Increment(ref running); + + "Starting HashCollectionStressTests with {0} threads".Print(noOfThreads); + var threads = new List(); + for (int i = 0; i < noOfThreads; i++) + { + threads.Add(new Thread(WorkerLoop)); + } + threads.ForEach(t => t.Start()); + + "Press Enter to Stop...".Print(); + Console.ReadLine(); + + Interlocked.Decrement(ref running); + + "Writes: {0}, Reads: {1}".Print(writeCount, readCount); + "{0} EndedAt: {1}".Print(GetType().Name, DateTime.UtcNow.ToLongTimeString()); + "{0} TimeTaken: {1}s".Print(GetType().Name, (DateTime.UtcNow - StartedAt).TotalSeconds); + + "\nPress Enter to Quit...".Print(); + Console.ReadLine(); + } + + public void WorkerLoop() + { + while (Interlocked.CompareExchange(ref running, 0, 0) > 0) + { + redisCollection.ContainsKey("key"); + Interlocked.Increment(ref readCount); + + redisCollection["key"] = "value " + readCount; + Interlocked.Increment(ref writeCount); + + var value = redisCollection["key"]; + Interlocked.Increment(ref readCount); + + if (value == null) + Console.WriteLine("value == null"); + } + } + } + + public class RedisCachedCollection : IEnumerable + { + private readonly string collectionKey; + private Func idAction; + private readonly IRedisClientsManager clientsManager; + + public RedisCachedCollection(IRedisClientsManager clientsManager, string collectionKey) + { + this.clientsManager = clientsManager; + this.collectionKey = string.Format("urn:{0}:{1}", "XXXXX", collectionKey); + } + + public IRedisClient RedisConnection + { + get + { + return clientsManager.GetClient(); + } + } + + private IRedisHash GetCollection(IRedisClient redis) + { + var _redisTypedClient = redis.As(); + return _redisTypedClient.GetHash(collectionKey); + } + + public void Add(TValue obj) + { + TKey Id = GetUniqueIdAction(obj); + + RetryAction((redis) => + { + GetCollection(redis).Add(Id, obj); + }); + } + + public bool Remove(TValue obj) + { + TKey Id = GetUniqueIdAction(obj); + TKey defaultv = default(TKey); + + return RetryAction((redis) => + { + if (!Id.Equals(defaultv)) + { + { + return GetCollection(redis).Remove(Id); + } + } + return false; + }); + + } + + public TValue this[TKey id] + { + get + { + return RetryAction((redis) => + { + if (GetCollection(redis).ContainsKey(id)) + return GetCollection(redis)[id]; + return default(TValue); + }); + } + set + { + RetryAction((redis) => + { + GetCollection(redis)[id] = value; + }); + } + } + public int Count + { + get + { + return RetryAction((redis) => + { + return GetCollection(redis).Count; + }); + } + } + + public IEnumerable Where(Func predicate) + { + return RetryAction((redis) => + { + return GetCollection(redis).Values.Where(predicate); + }); + } + + public bool Any(Func predicate) + { + return RetryAction((redis) => + { + return GetCollection(redis).Values.Any(predicate); + }); + } + + + public IEnumerator GetEnumerator() + { + return RetryAction>((redis) => + { + return GetCollection(redis).Values.GetEnumerator(); + }); + } + + System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() + { + return RetryAction(redis => + { + return ((System.Collections.IEnumerable)GetCollection(redis).Values).GetEnumerator(); + }); + + } + + public void Clear() + { + RetryAction((redis) => + { + GetCollection(redis).Clear(); + }); + } + + public bool Contains(TValue obj) + { + TKey Id = GetUniqueIdAction(obj); + return RetryAction((redis) => + { + return GetCollection(redis).ContainsKey(Id); + }); + } + + public bool ContainsKey(TKey obj) + { + return RetryAction((redis) => + { + return GetCollection(redis).ContainsKey(obj); + }); + } + + public void CopyTo(TValue[] array, int arrayIndex) + { + RetryAction((redis) => + { + GetCollection(redis).Values.CopyTo(array, arrayIndex); + }); + } + + public bool IsReadOnly + { + get + { + return RetryAction((redis) => + { + return GetCollection(redis).IsReadOnly; + }); + } + } + + public Func GetUniqueIdAction + { + get + { + return idAction; + } + set + { + idAction = value; + } + } + + private void RetryAction(Action action) + { + try + { + using (var redis = RedisConnection) + { + action(redis); + return; + } + } + catch (Exception ex) + { + Console.WriteLine(ex); + throw; + } + } + + private TOut RetryAction(Func action) + { + int i = 0; + + while (true) + { + try + { + using (var redis = RedisConnection) + { + TOut result = action(redis); + return result; + } + } + catch (Exception) + { + + if (i++ < 3) + { + + continue; + } + + throw; + } + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/HashStressTest.cs b/tests/Console.Tests/HashStressTest.cs new file mode 100644 index 00000000..5259a84d --- /dev/null +++ b/tests/Console.Tests/HashStressTest.cs @@ -0,0 +1,131 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using ServiceStack.Redis; +using ServiceStack.Redis.Generic; +using ServiceStack.Text; + +namespace ConsoleTests +{ + class DeviceInfo + { + public Guid PlayerID { get; set; } + public DateTime? LastErrTime { get; set; } + public DateTime? LastWarnTime { get; set; } + + protected bool Equals(DeviceInfo other) + { + return PlayerID.Equals(other.PlayerID) + && LastErrTime.Equals(other.LastErrTime) + && LastWarnTime.Equals(other.LastWarnTime); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((DeviceInfo) obj); + } + + public override int GetHashCode() + { + return base.GetHashCode(); + } + } + + public class HashStressTest + { + public RedisManagerPool redisManager; + private DeviceInfo data = new DeviceInfo + { + PlayerID = new Guid("560531b06bc945b688f3a6a8ade65354"), + LastErrTime = new DateTime(2000, 1, 1), + LastWarnTime = new DateTime(2001, 1, 1), + }; + + private int running = 0; + private string _collectionKey = typeof (HashStressTest).Name; + private TimeSpan? waitBeforeRetry = null; + //private TimeSpan? waitBeforeRetry = TimeSpan.FromMilliseconds(1); + + private long writeCount = 0; + private long readCount = 0; + + public void Execute(string ipAddress, int noOfThreads = 64) + { + redisManager = new RedisManagerPool(new[]{ ipAddress }, new RedisPoolConfig { + MaxPoolSize = noOfThreads + }); + + var StartedAt = DateTime.UtcNow; + Interlocked.Increment(ref running); + + "Starting HashStressTest with {0} threads".Print(noOfThreads); + var threads = new List(); + for (int i = 0; i < noOfThreads; i++) + { + threads.Add(new Thread(WorkerLoop)); + } + threads.ForEach(t => t.Start()); + + "Press Enter to Stop...".Print(); + Console.ReadLine(); + + Interlocked.Decrement(ref running); + + "Writes: {0}, Reads: {1}".Print(writeCount, readCount); + "{0} EndedAt: {1}".Print(GetType().Name, DateTime.UtcNow.ToLongTimeString()); + "{0} TimeTaken: {1}s".Print(GetType().Name, (DateTime.UtcNow - StartedAt).TotalSeconds); + + //Uncomment to wait for all threads to finish + //threads.Each(t => t.Join()); + + "\nPress Enter to Quit...".Print(); + Console.ReadLine(); + } + + public void WorkerLoop() + { + while (Interlocked.CompareExchange(ref running, 0, 0) > 0) + { + using (var client = redisManager.GetClient()) + { + try + { + GetCollection(client)[data.PlayerID] = data; + Interlocked.Increment(ref writeCount); + } + catch (Exception ex) + { + Console.WriteLine("WRITE ERROR: " + ex.Message); + } + + try + { + var readData = GetCollection(client)[data.PlayerID]; + Interlocked.Increment(ref readCount); + + if (!readData.Equals(data)) + { + Console.WriteLine("Data Error: " + readData.Dump()); + } + } + catch (Exception ex) + { + Console.WriteLine("READ ERROR: " + ex.Message); + } + } + + if (waitBeforeRetry != null) + Thread.Sleep(waitBeforeRetry.Value); + } + } + + private IRedisHash GetCollection(IRedisClient redis) + { + var _redisTypedClient = redis.As(); + return _redisTypedClient.GetHash(_collectionKey); + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs b/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs new file mode 100644 index 00000000..d6caa531 --- /dev/null +++ b/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs @@ -0,0 +1,134 @@ +using System; +using System.Diagnostics; +using System.IO; +using System.Threading; +using ServiceStack; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class LocalRedisSentinelFailoverTests : RedisSentinelFailoverTests + { + public static int[] RedisPorts = new[] { 6380, 6381, 6382, }; + + public static string[] SentinelHosts = new[] + { + "127.0.0.1:26380", + "127.0.0.1:26381", + "127.0.0.1:26382", + }; + + public bool StartAndStopRedisServers = false; + + private static void StartRedisServersAndSentinels() + { + + log.Debug("Starting all Redis Servers..."); + foreach (var port in RedisPorts) + { + StartRedisServer(port); + } + Thread.Sleep(1500); + + log.Debug("Starting all Sentinels..."); + foreach (var port in RedisPorts) + { + StartRedisSentinel(port); + } + Thread.Sleep(1500); + } + + private static void ShutdownRedisSentinelsAndServers() + { + log.Debug("Shutting down all Sentinels..."); + foreach (var host in SentinelHosts) + { + try + { + var client = new RedisClient(host) + { + ConnectTimeout = 100, + ReceiveTimeout = 100, + }; + client.ShutdownNoSave(); + } + catch (Exception ex) + { + log.Error("Error trying to shutdown {0}".Fmt(host), ex); + } + } + + log.Debug("Shutting down all Redis Servers..."); + foreach (var port in RedisPorts) + { + try + { + var client = new RedisClient("127.0.0.1", port) + { + ConnectTimeout = 100, + ReceiveTimeout = 100, + }; + client.ShutdownNoSave(); + } + catch (Exception ex) + { + "Error trying to shutdown {0}".Print(port); + ex.Message.Print(); + } + } + } + + public static void StartRedisServer(int port) + { + var pInfo = new ProcessStartInfo + { + FileName = new FileInfo(@"..\..\..\..\src\sentinel\redis\redis-server.exe").FullName, + Arguments = new FileInfo(@"..\..\..\..\src\sentinel\redis-{0}\redis.windows.conf".Fmt(port)).FullName, + RedirectStandardError = true, + RedirectStandardOutput = true, + UseShellExecute = false, + CreateNoWindow = true, + }; + + ThreadPool.QueueUserWorkItem(state => Process.Start(pInfo)); + } + + public static void StartRedisSentinel(int port) + { + var pInfo = new ProcessStartInfo + { + FileName = new FileInfo(@"..\..\..\..\src\sentinel\redis\redis-server.exe").FullName, + Arguments = new FileInfo(@"..\..\..\..\src\sentinel\redis-{0}\sentinel.conf".Fmt(port)).FullName + " --sentinel", + RedirectStandardError = true, + RedirectStandardOutput = true, + UseShellExecute = false, + CreateNoWindow = true, + }; + + ThreadPool.QueueUserWorkItem(state => Process.Start(pInfo)); + } + + protected override RedisSentinel CreateSentinel() + { + return new RedisSentinel(SentinelHosts); + } + + protected override void OnSetUp() + { + if (StartAndStopRedisServers) + StartRedisServersAndSentinels(); + } + + protected override void OnTearDown() + { + log.Debug("Press Enter to shutdown Redis Sentinels and Servers..."); + Console.ReadLine(); + if (StartAndStopRedisServers) + ShutdownRedisSentinelsAndServers(); + + Console.ReadLine(); + } + } + +} \ No newline at end of file diff --git a/tests/Console.Tests/LongRunningRedisPubSubServer.cs b/tests/Console.Tests/LongRunningRedisPubSubServer.cs new file mode 100644 index 00000000..7c72849a --- /dev/null +++ b/tests/Console.Tests/LongRunningRedisPubSubServer.cs @@ -0,0 +1,111 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using System.Timers; +using ServiceStack.Redis; +using ServiceStack.Text; +using Timer = System.Timers.Timer; + +namespace ConsoleTests +{ + public class LongRunningRedisPubSubServer + { + private const string Channel = "longrunningtest"; + private static DateTime StartedAt; + + private static long MessagesSent = 0; + private static long HeartbeatsSent = 0; + private static long HeartbeatsReceived = 0; + private static long StartCount = 0; + private static long StopCount = 0; + private static long DisposeCount = 0; + private static long ErrorCount = 0; + private static long FailoverCount = 0; + private static long UnSubscribeCount = 0; + + public static RedisManagerPool Manager { get; set; } + public static RedisPubSubServer PubSubServer { get; set; } + + public void Execute(string ipAddress) + { + Manager = new RedisManagerPool(ipAddress); + StartedAt = DateTime.UtcNow; + + var q = new Timer { Interval = 1000 }; + q.Elapsed += OnInterval; + q.Enabled = true; + + using (PubSubServer = new RedisPubSubServer(Manager, Channel) + { + OnStart = () => + { + Console.WriteLine("OnStart: #" + Interlocked.Increment(ref StartCount)); + }, + OnHeartbeatSent = () => + { + Console.WriteLine("OnHeartbeatSent: #" + Interlocked.Increment(ref HeartbeatsSent)); + }, + OnHeartbeatReceived = () => + { + Console.WriteLine("OnHeartbeatReceived: #" + Interlocked.Increment(ref HeartbeatsReceived)); + }, + OnMessage = (channel, msg) => + { + Console.WriteLine("OnMessage: @" + channel + ": " + msg); + }, + OnStop = () => + { + Console.WriteLine("OnStop: #" + Interlocked.Increment(ref StopCount)); + }, + OnError = ex => + { + Console.WriteLine("OnError: #" + Interlocked.Increment(ref ErrorCount) + " ERROR: " + ex); + }, + OnFailover = server => + { + Console.WriteLine("OnFailover: #" + Interlocked.Increment(ref FailoverCount)); + }, + OnDispose = () => + { + Console.WriteLine("OnDispose: #" + Interlocked.Increment(ref DisposeCount)); + }, + OnUnSubscribe = channel => + { + Console.WriteLine("OnUnSubscribe: #" + Interlocked.Increment(ref UnSubscribeCount) + " channel: " + channel); + }, + }) + { + Console.WriteLine("PubSubServer StartedAt: " + StartedAt.ToLongTimeString()); + PubSubServer.Start(); + + "Press Enter to Quit...".Print(); + Console.ReadLine(); + Console.WriteLine("PubSubServer EndedAt: " + DateTime.UtcNow.ToLongTimeString()); + Console.WriteLine("PubSubServer TimeTaken: " + (DateTime.UtcNow - StartedAt).TotalSeconds + "s"); + } + } + + private static void OnInterval(object sender, ElapsedEventArgs e) + { + Task.Factory.StartNew(PublishMessage); + } + + private static void PublishMessage() + { + try + { + var message = "MSG: #" + Interlocked.Increment(ref MessagesSent); + Console.WriteLine("PublishMessage(): " + message); + using (var redis = Manager.GetClient()) + { + redis.PublishMessage(Channel, message); + } + } + catch (Exception ex) + { + Console.WriteLine("ERROR PublishMessage: " + ex); + } + } + + } +} \ No newline at end of file diff --git a/tests/Console.Tests/MasterFailoverWithPassword.cs b/tests/Console.Tests/MasterFailoverWithPassword.cs new file mode 100644 index 00000000..a77b7093 --- /dev/null +++ b/tests/Console.Tests/MasterFailoverWithPassword.cs @@ -0,0 +1,40 @@ +using System; +using System.Threading; +using ServiceStack; +using ServiceStack.Redis; + +namespace ConsoleTests +{ + public class MasterFailoverWithPassword + { + public void Execute() + { + string AddPassword(string host) => $"password@{host}"; + + var sentinelHosts = new[] { "127.0.0.1:26380", "127.0.0.1:26381", "127.0.0.1:26382" }; + var sentinel = new RedisSentinel(sentinelHosts.Map(AddPassword), masterName: "mymaster") { + HostFilter = AddPassword, + SentinelHostFilter = AddPassword, + }; + var manager = sentinel.Start(); + + sentinel.OnWorkerError = Console.WriteLine; + + while (true) + { + try + { + const string RedisKey = "my Name"; + using var client = manager.GetClient(); + var result = client.Get(RedisKey); + Console.WriteLine("Redis Key: {0} \t Port: {1}", result, client.Port); + } + catch (Exception ex) + { + Console.WriteLine("Error {0}".Fmt(ex.Message)); + } + Thread.Sleep(3000); + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/MultiBlockingRemoveAfterReconnection.cs b/tests/Console.Tests/MultiBlockingRemoveAfterReconnection.cs new file mode 100644 index 00000000..3abdf40a --- /dev/null +++ b/tests/Console.Tests/MultiBlockingRemoveAfterReconnection.cs @@ -0,0 +1,72 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class MultiBlockingRemoveAfterReconnection + { + protected internal static RedisManagerPool RedisManager; + + public void Execute() + { +// LogManager.LogFactory = new ConsoleLogFactory(); +// RedisConfig.EnableVerboseLogging = true; + + RedisConfig.DefaultConnectTimeout = 20 * 1000; + RedisConfig.DefaultRetryTimeout = 20 * 1000; + + RedisManager = new RedisManagerPool($"localhost:6379?db=9"); + + MultipleBlocking(3); + + Console.ReadLine(); + } + + private static void MultipleBlocking(int count) + { + for (int i = 0; i < count; i++) + { + var queue = $"Q{i + 1}"; + RunTask(() => BlockingRemoveStartFromList(queue), $"Receive from {queue}"); + } + } + public static void BlockingRemoveStartFromList(string queue) + { + using (var client = RedisManager.GetClient() as RedisClient) + { + client.Ping(); + Console.WriteLine($"#{client.ClientId} Listening to {queue}"); + + var fromList = client.BlockingRemoveStartFromList(queue, TimeSpan.FromHours(10)); + Console.WriteLine($"#{client.ClientId} Received: '{fromList.Dump()}' from '{queue}'"); + } + } + + private static void RunTask(Action action, string name) + { + Task.Run(() => + { + + while (true) + { + try + { + Console.WriteLine($"Invoking {name}"); + action.Invoke(); + } + catch (Exception exception) + { + Console.WriteLine($"Exception in {name}: {exception}"); + //Thread.Sleep(5000);// Give redis some time to wake up! + } + + Thread.Sleep(100); + } + }); + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/NetworkRedisSentinelFailoverTests.cs b/tests/Console.Tests/NetworkRedisSentinelFailoverTests.cs new file mode 100644 index 00000000..27c25a75 --- /dev/null +++ b/tests/Console.Tests/NetworkRedisSentinelFailoverTests.cs @@ -0,0 +1,27 @@ +using System.Collections.Generic; +using ServiceStack.Redis; + +namespace ConsoleTests +{ + public class NetworkRedisSentinelFailoverTests : RedisSentinelFailoverTests + { + public static string[] SentinelHosts = new[] + { + "10.0.0.9:26380", + "10.0.0.9:26381", + "10.0.0.9:26382", + }; + + protected override RedisSentinel CreateSentinel() + { + var sentinel = new RedisSentinel(SentinelHosts) + { + IpAddressMap = + { + {"127.0.0.1", "10.0.0.9"}, + } + }; + return sentinel; + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/Program.cs b/tests/Console.Tests/Program.cs new file mode 100644 index 00000000..2db1835b --- /dev/null +++ b/tests/Console.Tests/Program.cs @@ -0,0 +1,48 @@ +namespace ConsoleTests +{ + public class Incr + { + public long Id { get; set; } + } + + public class IncrResponse + { + public long Result { get; set; } + } + + class Program + { + static void Main(string[] args) + { + //new LongRunningRedisPubSubServer().Execute("10.0.0.9"); + //new HashStressTest().Execute("127.0.0.1"); + //new HashStressTest().Execute("10.0.0.9"); + //new HashCollectionStressTests().Execute("10.0.0.9", noOfThreads: 64); + + //new LocalRedisSentinelFailoverTests + //{ + // StartAndStopRedisServers = true + //}.Execute(); + + //new LocalRedisSentinelFailoverTests { + // UseRedisManagerPool = true, StartAndStopRedisServers = false }.Execute(); + //new LocalRedisSentinelFailoverTests().Execute(); + + //new NetworkRedisSentinelFailoverTests().Execute(); + + //new GoogleRedisSentinelFailoverTests().Execute(); + + //new ForceFailover().Execute(); + + //new BlockingPop().Execute(); + + //new MasterFailoverWithPassword().Execute(); + + //new BlockingRemoveAfterReconnection().Execute(); + + //new MultiBlockingRemoveAfterReconnection().Execute(); + + new DbSelectConnectionStringIssue().Execute(); + } + } +} diff --git a/tests/Console.Tests/RedisSentinelFailoverTests.cs b/tests/Console.Tests/RedisSentinelFailoverTests.cs new file mode 100644 index 00000000..b15bfa88 --- /dev/null +++ b/tests/Console.Tests/RedisSentinelFailoverTests.cs @@ -0,0 +1,126 @@ +using System; +using System.Threading; +using ServiceStack; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; +using Timer = System.Timers.Timer; + +namespace ConsoleTests +{ + /* + * 1. Start all Redis Servers + Sentinels + * 2. Failover the first Sentinel + * 3. Kill the current master + */ + public abstract class RedisSentinelFailoverTests + { + protected static ILog log; + + public int MessageInterval = 1000; + + public bool UseRedisManagerPool = false; + + public void Execute() + { + RedisConfig.EnableVerboseLogging = false; + LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); + log = LogManager.GetLogger(GetType()); + + RedisConfig.DefaultReceiveTimeout = 10000; + + OnSetUp(); + + using (var sentinel = CreateSentinel()) + { + if (UseRedisManagerPool) + { + sentinel.RedisManagerFactory = (masters, replicas) => + new RedisManagerPool(masters); + } + + var redisManager = sentinel.Start(); + + int i = 0; + var clientTimer = new Timer + { + Interval = MessageInterval, + Enabled = true + }; + clientTimer.Elapsed += (sender, args) => + { + log.Debug("clientTimer.Elapsed: " + (i++)); + + try + { + string key = null; + using (var master = (RedisClient)redisManager.GetClient()) + { + var counter = master.Increment("key", 1); + key = "key" + counter; + log.DebugFormat("Set key {0} in read/write client #{1}@{2}", key, master.Id, master.GetHostString()); + master.SetValue(key, "value" + 1); + } + using (var readOnly = (RedisClient)redisManager.GetReadOnlyClient()) + { + log.DebugFormat("Get key {0} in read-only client #{1}@{2}", key, readOnly.Id, readOnly.GetHostString()); + var value = readOnly.GetValue(key); + log.DebugFormat("{0} = {1}", key, value); + } + } + catch (ObjectDisposedException) + { + log.DebugFormat("ObjectDisposedException detected, disposing timer..."); + clientTimer.Dispose(); + } + catch (Exception ex) + { + log.Error("Error in Timer", ex); + } + + if (i % 10 == 0) + log.Debug(RedisStats.ToDictionary().Dump()); + }; + + log.Debug("Sleeping for 5000ms..."); + Thread.Sleep(5000); + + log.Debug("Failing over master..."); + sentinel.ForceMasterFailover(); + log.Debug("master was failed over"); + + log.Debug("Sleeping for 20000ms..."); + Thread.Sleep(20000); + + try + { + var debugConfig = sentinel.GetMaster(); + using (var master = new RedisClient(debugConfig)) + { + log.Debug("Putting master '{0}' to sleep for 35 seconds...".Fmt(master.GetHostString())); + master.DebugSleep(35); + } + } + catch (Exception ex) + { + log.Error("Error retrieving master for DebugSleep()", ex); + } + + log.Debug("After DEBUG SLEEP... Sleeping for 5000ms..."); + Thread.Sleep(5000); + + log.Debug("RedisStats:"); + log.Debug(RedisStats.ToDictionary().Dump()); + + System.Console.ReadLine(); + } + + OnTearDown(); + } + + protected abstract RedisSentinel CreateSentinel(); + + protected virtual void OnSetUp() { } + protected virtual void OnTearDown() { } + } +} \ No newline at end of file diff --git a/tests/Directory.Build.props b/tests/Directory.Build.props new file mode 100644 index 00000000..7eb7717e --- /dev/null +++ b/tests/Directory.Build.props @@ -0,0 +1,29 @@ + + + + 6.0.3 + latest + false + + + + DEBUG + + + + $(DefineConstants);NETFX;NET472 + + + + $(DefineConstants);NETCORE;NETSTANDARD2_0 + + + + $(DefineConstants);NET6_0;NET6_0_OR_GREATER + + + + $(DefineConstants);NETCORE;NETCORE_SUPPORT + + + diff --git a/tests/ServiceStack.Redis.Benchmark/IncrBenchmarks.cs b/tests/ServiceStack.Redis.Benchmark/IncrBenchmarks.cs new file mode 100644 index 00000000..b39aa825 --- /dev/null +++ b/tests/ServiceStack.Redis.Benchmark/IncrBenchmarks.cs @@ -0,0 +1,281 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Configs; +using BenchmarkDotNet.Jobs; +using BenchmarkDotNet.Order; +using Pipelines.Sockets.Unofficial; +using Respite; +using StackExchange.Redis; +using System.Linq; +using System.Net.Sockets; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Benchmark +{ + [SimpleJob(RuntimeMoniker.Net472)] + [SimpleJob(RuntimeMoniker.NetCoreApp31)] + [MemoryDiagnoser] + [GroupBenchmarksBy(BenchmarkLogicalGroupRule.ByCategory)] + [Orderer(SummaryOrderPolicy.Method, MethodOrderPolicy.Alphabetical)] + [CategoriesColumn] + public class IncrBenchmarks + { + ConnectionMultiplexer _seredis; + IServer _seredis_server; + IDatabase _seredis_db; + RedisClient _ssredis; + IRedisClientAsync _ssAsync; + RespConnection _respite; + + static IncrBenchmarks() + { + RedisClient.NewFactoryFn = () => new RedisClient("127.0.0.1", 6379); + } + + [GlobalSetup] + public Task Setup() => Setup(false); + internal async Task Setup(bool minimal) + { + _ssredis = RedisClient.New(); + _ssAsync = _ssredis; + + if (!minimal) + { + _seredis = await ConnectionMultiplexer.ConnectAsync("127.0.0.1:6379"); + _seredis_server = _seredis.GetServer(_seredis.GetEndPoints().Single()); + _seredis_db = _seredis.GetDatabase(); + + var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); + SocketConnection.SetRecommendedClientOptions(socket); + socket.Connect("127.0.0.1", 6379); + + _respite = RespConnection.Create(socket); + } + } + + [GlobalCleanup] + public async Task Teardown() + { + _seredis?.Dispose(); + _ssredis?.Dispose(); + if (_respite != null) await _respite.DisposeAsync(); + + _seredis_server = null; + _seredis_db = null; + _seredis = null; + _ssredis = null; + _respite = null; + _ssAsync = null; + } + + const string Key = "my_key"; +#if DEBUG + const int PER_TEST = 10; +#else + const int PER_TEST = 1000; +#endif + + [BenchmarkCategory("IncrAsync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisIncrAsync() + { + long last = default; + await _seredis_db.KeyDeleteAsync(Key); + for (int i = 0; i < PER_TEST; i++) + { + last = await _seredis_db.StringIncrementAsync(Key); + } + return last; + } + + [BenchmarkCategory("IncrSync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public long SERedisIncrSync() + { + long last = default; + _seredis_db.KeyDelete(Key); + for (int i = 0; i < PER_TEST; i++) + { + last = _seredis_db.StringIncrement(Key); + } + return last; + } + + [BenchmarkCategory("PipelineIncrAsync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisPipelineIncrAsync() + { + var last = Task.FromResult(0L); + await _seredis_db.KeyDeleteAsync(Key); + var batch = _seredis_db.CreateBatch(); + for (int i = 0; i < PER_TEST; i++) + { + last = batch.StringIncrementAsync(Key); + } + batch.Execute(); + return await last; + } + + [BenchmarkCategory("TransactionIncrAsync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisTransactionIncrAsync() + { + var last = Task.FromResult(0L); + await _seredis_db.KeyDeleteAsync(Key); + var batch = _seredis_db.CreateTransaction(); + for (int i = 0; i < PER_TEST; i++) + { + last = batch.StringIncrementAsync(Key); + } + await batch.ExecuteAsync(); + return await last; + } + + [BenchmarkCategory("TransactionIncrSync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisTransactionIncrSync() + { + var last = Task.FromResult(0L); + _seredis_db.KeyDelete(Key); + var batch = _seredis_db.CreateTransaction(); + for (int i = 0; i < PER_TEST; i++) + { + last = batch.StringIncrementAsync(Key); + } + batch.Execute(); + return await last; + } + + [BenchmarkCategory("IncrAsync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public async Task SSRedisIncrAsync() + { + long last = default; + _ssredis.Del(Key); // todo: asyncify + for (int i = 0; i < PER_TEST; i++) + { + last = await _ssAsync.IncrementValueAsync(Key); + } + return last; + } + + + [BenchmarkCategory("IncrSync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public long SSRedisIncrSync() + { + long last = default; + _ssredis.Del(Key); + for (int i = 0; i < PER_TEST; i++) + { + last = _ssredis.IncrementValue(Key); + } + return last; + } + + [BenchmarkCategory("PipelineIncrSync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public long SSRedisPipelineIncrSync() + { + long last = default; + _ssredis.Del(Key); + using var trans = _ssredis.CreatePipeline(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValue(Key), l => last = l); + } + trans.Flush(); + return last; + } + + [BenchmarkCategory("TransactionIncrSync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public long SSRedisTransactionIncrSync() + { + long last = default; + _ssredis.Del(Key); + using var trans = _ssredis.CreateTransaction(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValue(Key), l => last = l); + } + trans.Commit(); + return last; + } + + [BenchmarkCategory("PipelineIncrAsync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public async Task SSRedisPipelineIncrAsync() + { + long last = default; + _ssredis.Del(Key); // todo: asyncify + await using var trans = _ssAsync.CreatePipeline(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), l => last = l); + } + await trans.FlushAsync(); + return last; + } + + [BenchmarkCategory("TransactionIncrAsync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public async Task SSRedisTransactionIncrAsync() + { + long last = default; + _ssredis.Del(Key); // todo: asyncify + await using var trans = await _ssAsync.CreateTransactionAsync(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), l => last = l); + } + await trans.CommitAsync(); + return last; + } + + + //static readonly RespValue s_Time = RespValue.CreateAggregate( + // RespType.Array, RespValue.Create(RespType.BlobString, "time")); + + //static DateTime ParseTime(in RespValue value) + //{ + // var parts = value.SubItems; + // if (parts.TryGetSingleSpan(out var span)) + // return Parse(span[0], span[1]); + // return Slow(parts); + // static DateTime Slow(in ReadOnlyBlock parts) + // { + // var iter = parts.GetEnumerator(); + // if (!iter.MoveNext()) Throw(); + // var seconds = iter.Current; + // if (!iter.MoveNext()) Throw(); + // var microseconds = iter.Current; + // return Parse(seconds, microseconds); + // static void Throw() => throw new InvalidOperationException(); + // } + + // static DateTime Parse(in RespValue seconds, in RespValue microseconds) + // => Epoch.AddSeconds(seconds.ToInt64()).AddMilliseconds(microseconds.ToInt64() / 1000.0); + //} + //static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); + + //[BenchmarkCategory("IncrSync")] + //[Benchmark(Description = "Respite", OperationsPerInvoke = PER_TEST)] + //public void RespiteTimeSync() + //{ + // for (int i = 0; i < PER_TEST; i++) + // { + // _respite.Call(s_Time, val => ParseTime(val)); + // } + //} + + //[BenchmarkCategory("IncrAsync")] + //[Benchmark(Description = "Respite", OperationsPerInvoke = PER_TEST)] + //public async Task RespiteTimeAsync() + //{ + // for (int i = 0; i < PER_TEST; i++) + // { + // await _respite.CallAsync(s_Time, val => ParseTime(val)); + // } + //} + } +} diff --git a/tests/ServiceStack.Redis.Benchmark/Program.cs b/tests/ServiceStack.Redis.Benchmark/Program.cs new file mode 100644 index 00000000..84c48100 --- /dev/null +++ b/tests/ServiceStack.Redis.Benchmark/Program.cs @@ -0,0 +1,39 @@ +using BenchmarkDotNet.Running; +using System.Threading.Tasks; +using System; +namespace ServiceStack.Redis.Benchmark +{ + class Program + { +#if DEBUG + static async Task Main() + { + var obj = new IncrBenchmarks(); + try + { + await obj.Setup(false); + + Console.WriteLine(obj.SERedisIncrSync()); + Console.WriteLine(await obj.SERedisIncrAsync()); + Console.WriteLine(await obj.SERedisPipelineIncrAsync()); + Console.WriteLine(await obj.SERedisTransactionIncrAsync()); + Console.WriteLine(await obj.SERedisTransactionIncrSync()); + + Console.WriteLine(obj.SSRedisIncrSync()); + Console.WriteLine(obj.SSRedisPipelineIncrSync()); + Console.WriteLine(obj.SSRedisTransactionIncrSync()); + Console.WriteLine(await obj.SSRedisIncrAsync()); + Console.WriteLine(await obj.SSRedisPipelineIncrAsync()); + Console.WriteLine(await obj.SSRedisTransactionIncrAsync()); + } + finally + { + await obj.Teardown(); + } + } +#else + static void Main(string[] args) + => BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args); +#endif + } +} diff --git a/tests/ServiceStack.Redis.Benchmark/ServiceStack.Redis.Benchmark.csproj b/tests/ServiceStack.Redis.Benchmark/ServiceStack.Redis.Benchmark.csproj new file mode 100644 index 00000000..a5bcef26 --- /dev/null +++ b/tests/ServiceStack.Redis.Benchmark/ServiceStack.Redis.Benchmark.csproj @@ -0,0 +1,18 @@ + + + + Exe + net6.0;net472 + 8 + + + + + + + + + + + + diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/NetCoreTestsRunner.cs b/tests/ServiceStack.Redis.Tests.Sentinel/NetCoreTestsRunner.cs new file mode 100644 index 00000000..82e5386b --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/NetCoreTestsRunner.cs @@ -0,0 +1,38 @@ +#if NUNITLITE +using NUnitLite; +using NUnit.Common; +using System.Reflection; +using ServiceStack; +using ServiceStack.Text; +using System; +using System.Globalization; +using System.Threading; + +namespace ServiceStack.Redis.Tests.Sentinel +{ + public class NetCoreTestsRunner + { + /// + /// The main program executes the tests. Output may be routed to + /// various locations, depending on the arguments passed. + /// + /// Run with --help for a full list of arguments supported + /// + public static int Main(string[] args) + { + var licenseKey = Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); + if (licenseKey.IsNullOrEmpty()) + throw new ArgumentNullException("SERVICESTACK_LICENSE", "Add Environment variable for SERVICESTACK_LICENSE"); + + Licensing.RegisterLicense(licenseKey); + //"ActivatedLicenseFeatures: ".Print(LicenseUtils.ActivatedLicenseFeatures()); + + CultureInfo.DefaultThreadCurrentCulture = new CultureInfo("en-US"); + JsConfig.InitStatics(); + //JsonServiceClient client = new JsonServiceClient(); + var writer = new ExtendedTextWrapper(Console.Out); + return new AutoRun(((IReflectableType)typeof(NetCoreTestsRunner)).GetTypeInfo().Assembly).Execute(args, writer, Console.In); + } + } +} +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Properties/AssemblyInfo.cs b/tests/ServiceStack.Redis.Tests.Sentinel/Properties/AssemblyInfo.cs similarity index 83% rename from tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Properties/AssemblyInfo.cs rename to tests/ServiceStack.Redis.Tests.Sentinel/Properties/AssemblyInfo.cs index b8e952f8..aa61922b 100644 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Properties/AssemblyInfo.cs +++ b/tests/ServiceStack.Redis.Tests.Sentinel/Properties/AssemblyInfo.cs @@ -5,12 +5,12 @@ // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. -[assembly: AssemblyTitle("ServiceStack.Redis")] +[assembly: AssemblyTitle("ServiceStack.Redis.Tests.Sentinel")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("ServiceStack.Redis")] -[assembly: AssemblyCopyright("Copyright © 2010")] +[assembly: AssemblyProduct("ServiceStack.Redis.Tests.Sentinel")] +[assembly: AssemblyCopyright("Copyright © 2015")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] @@ -20,7 +20,7 @@ [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM -[assembly: Guid("70a33fa7-9f81-418d-bb25-6a4be6648ae4")] +[assembly: Guid("d57d78f4-4b0b-40eb-a162-f6146e358294")] // Version information for an assembly consists of the following four values: // diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs new file mode 100644 index 00000000..6fdee8fb --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs @@ -0,0 +1,76 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests.Sentinel +{ + [TestFixture, Category("Integration")] + [Ignore("Requires cloud setup")] + public class Redis3SentinelSetupTests + : RedisSentinelTestBase + { + [Test] + public void Can_connect_to_3SentinelSetup() + { + var sentinel = new RedisSentinel(SentinelHosts); + + var redisManager = sentinel.Start(); + + using (var client = redisManager.GetClient()) + { + client.FlushAll(); + + client.SetValue("Sentinel3Setup", "IntranetSentinel"); + + var result = client.GetValue("Sentinel3Setup"); + Assert.That(result, Is.EqualTo("IntranetSentinel")); + } + } + + [Test] + public void Can_connect_directly_to_Redis_Instances() + { + foreach (var host in GoogleCloudSentinelHosts) + { + using (var client = new RedisClient(host, 6379)) + { + "{0}:6379".Print(host); + client.Info.PrintDump(); + } + + using (var sentinel = new RedisClient(host, 26379)) + { + "{0}:26379".Print(host); + sentinel.Info.PrintDump(); + } + } + } + + [Test] + public void Can_connect_to_GoogleCloud_3SentinelSetup() + { + var sentinel = CreateGCloudSentinel(); + + var redisManager = sentinel.Start(); + + using (var client = redisManager.GetClient()) + { + "{0}:{1}".Print(client.Host, client.Port); + + client.FlushAll(); + + client.SetValue("Sentinel3Setup", "GoogleCloud"); + + var result = client.GetValue("Sentinel3Setup"); + Assert.That(result, Is.EqualTo("GoogleCloud")); + } + + using (var readOnly = redisManager.GetReadOnlyClient()) + { + "{0}:{1}".Print(readOnly.Host, readOnly.Port); + + var result = readOnly.GetValue("Sentinel3Setup"); + Assert.That(result, Is.EqualTo("GoogleCloud")); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs new file mode 100644 index 00000000..ee8dddc3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs @@ -0,0 +1,292 @@ +using System; +using System.Collections.Generic; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests.Sentinel +{ + [TestFixture] + public class RedisResolverTests + : RedisSentinelTestBase + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + StartAllRedisServers(); + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + ShutdownAllRedisServers(); + } + + [Test] + public void RedisResolver_does_reset_when_detects_invalid_master() + { + var invalidMaster = new[] { ReplicaHosts[0] }; + var invalidReplicas = new[] { MasterHosts[0], ReplicaHosts[1] }; + + using (var redisManager = new PooledRedisClientManager(invalidMaster, invalidReplicas)) + { + var resolver = (RedisResolver)redisManager.RedisResolver; + + using (var master = redisManager.GetClient()) + { + master.SetValue("KEY", "1"); + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + } + using (var master = redisManager.GetClient()) + { + master.Increment("KEY", 1); + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + } + + "Masters:".Print(); + resolver.Masters.PrintDump(); + "Replicas:".Print(); + resolver.Slaves.PrintDump(); + } + } + + [Test] + public void PooledRedisClientManager_alternates_hosts() + { + using var redisManager = new PooledRedisClientManager(MasterHosts, ReplicaHosts); + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + + 5.Times(i => { + using var readOnly = redisManager.GetReadOnlyClient(); + Assert.That(readOnly.GetHostString(), Is.EqualTo(ReplicaHosts[i % ReplicaHosts.Length])); + Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); + }); + + using (var cache = redisManager.GetCacheClient()) + { + Assert.That(cache.Get("KEY"), Is.EqualTo("2")); + } + } + + [Test] + public void RedisManagerPool_alternates_hosts() + { + using var redisManager = new RedisManagerPool(MasterHosts); + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + + 5.Times(i => { + using var readOnly = redisManager.GetReadOnlyClient(); + Assert.That(readOnly.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); + }); + + using (var cache = redisManager.GetCacheClient()) + { + Assert.That(cache.Get("KEY"), Is.EqualTo("2")); + } + } + + [Test] + public void BasicRedisClientManager_alternates_hosts() + { + using (var redisManager = new BasicRedisClientManager(MasterHosts, ReplicaHosts)) + { + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + + 5.Times(i => { + using var readOnly = redisManager.GetReadOnlyClient(); + Assert.That(readOnly.GetHostString(), Is.EqualTo(ReplicaHosts[i % ReplicaHosts.Length])); + Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); + }); + + using (var cache = redisManager.GetCacheClient()) + { + Assert.That(cache.Get("KEY"), Is.EqualTo("2")); + } + } + } + + public class FixedResolver : IRedisResolver + { + private readonly RedisEndpoint master; + private readonly RedisEndpoint replica; + public int NewClientsInitialized = 0; + + public FixedResolver(RedisEndpoint master, RedisEndpoint replica) + { + this.master = master; + this.replica = replica; + this.ClientFactory = RedisConfig.ClientFactory; + } + + public Func ClientFactory { get; set; } + + public int ReadWriteHostsCount => 1; + public int ReadOnlyHostsCount => 1; + + public void ResetMasters(IEnumerable hosts) { } + public void ResetSlaves(IEnumerable hosts) { } + + public RedisClient CreateRedisClient(RedisEndpoint config, bool master) + { + NewClientsInitialized++; + return ClientFactory(config); + } + + public RedisClient CreateMasterClient(int desiredIndex) + { + return CreateRedisClient(master, master: true); + } + + public RedisClient CreateSlaveClient(int desiredIndex) + { + return CreateRedisClient(replica, master: false); + } + } + + [Test] + public void PooledRedisClientManager_can_execute_CustomResolver() + { + var resolver = new FixedResolver(MasterHosts[0].ToRedisEndpoint(), ReplicaHosts[0].ToRedisEndpoint()); + using var redisManager = new PooledRedisClientManager("127.0.0.1:8888") + { + RedisResolver = resolver + }; + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); + + 5.Times(i => + { + using (var replica = redisManager.GetReadOnlyClient()) + { + Assert.That(replica.GetHostString(), Is.EqualTo(ReplicaHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); + } + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(2)); + + redisManager.FailoverTo("127.0.0.1:9999", "127.0.0.1:9999"); + + 5.Times(i => + { + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(master.GetValue("KEY"), Is.EqualTo("2")); + } + using (var replica = redisManager.GetReadOnlyClient()) + { + Assert.That(replica.GetHostString(), Is.EqualTo(ReplicaHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); + } + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(4)); + } + + [Test] + public void RedisManagerPool_can_execute_CustomResolver() + { + var resolver = new FixedResolver(MasterHosts[0].ToRedisEndpoint(), ReplicaHosts[0].ToRedisEndpoint()); + using var redisManager = new RedisManagerPool("127.0.0.1:8888") + { + RedisResolver = resolver + }; + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); + + 5.Times(i => { + using var replica = redisManager.GetReadOnlyClient(); + Assert.That(replica.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); + + redisManager.FailoverTo("127.0.0.1:9999", "127.0.0.1:9999"); + + 5.Times(i => + { + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(master.GetValue("KEY"), Is.EqualTo("2")); + } + using (var replica = redisManager.GetReadOnlyClient()) + { + Assert.That(replica.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); + } + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(2)); + } + + private static void InitializeEmptyRedisManagers(IRedisClientsManager redisManager, string[] masters, string[] replicas) + { + var hasResolver = (IHasRedisResolver)redisManager; + hasResolver.RedisResolver.ResetMasters(masters); + hasResolver.RedisResolver.ResetSlaves(replicas); + + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(masters[0])); + master.SetValue("KEY", "1"); + } + using (var replica = redisManager.GetReadOnlyClient()) + { + Assert.That(replica.GetHostString(), Is.EqualTo(replicas[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("1")); + } + } + + [Test] + public void Can_initialize_ClientManagers_with_no_hosts() + { + InitializeEmptyRedisManagers(new PooledRedisClientManager(), MasterHosts, ReplicaHosts); + InitializeEmptyRedisManagers(new RedisManagerPool(), MasterHosts, MasterHosts); + InitializeEmptyRedisManagers(new BasicRedisClientManager(), MasterHosts, ReplicaHosts); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelConnectionTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelConnectionTests.cs new file mode 100644 index 00000000..a90f92b4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelConnectionTests.cs @@ -0,0 +1,49 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests.Sentinel +{ + public class RedisSentinelConnectionTests + { + [Test] + public void Can_connect_to_AWS_Redis_Sentinel_SentinelMaster() + { + RedisConfig.AssumeServerVersion = 4000; + + var client = new RedisClient("52.7.181.87", 26379); + + var info = client.SentinelMaster("mymaster"); + + info.PrintDump(); + } + + [Test] + public void Can_connect_to_AWS_Redis_Sentinel_Ping() + { + RedisConfig.AssumeServerVersion = 4000; + + var client = new RedisClient("52.7.181.87", 26379); + + Assert.That(client.Ping()); + } + + [Test] + public void Can_connect_to_RedisSentinel() + { + RedisConfig.AssumeServerVersion = 4000; + + var sentinel = new RedisSentinel("52.7.181.87:26379") { + IpAddressMap = { + {"127.0.0.1", "52.7.181.87"} + } + }; + + var redisManager = sentinel.Start(); + + using (var client = redisManager.GetClient()) + { + Assert.That(client.Ping()); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs new file mode 100644 index 00000000..5a176a85 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs @@ -0,0 +1,208 @@ +using System; +using System.Diagnostics; +using System.IO; +using System.Threading; +using ServiceStack.Text; +#if NETCORE +using System.Runtime.InteropServices; +#endif + +namespace ServiceStack.Redis.Tests.Sentinel +{ + public abstract class RedisSentinelTestBase + { + public static bool DisableLocalServers = false; + + public const string MasterName = "mymaster"; + public const string GCloudMasterName = "master"; + + public static string[] MasterHosts = new[] + { + "127.0.0.1:6380", + }; + + public static string[] ReplicaHosts = new[] + { + "127.0.0.1:6381", + "127.0.0.1:6382", + }; + + public static string[] SentinelHosts = new[] + { + "127.0.0.1:26380", + "127.0.0.1:26381", + "127.0.0.1:26382", + }; + + public static int[] RedisPorts = new[] + { + 6380, + 6381, + 6382, + }; + + public static int[] SentinelPorts = new[] + { + 26380, + 26381, + 26382, + }; + + public static string[] GoogleCloudSentinelHosts = new[] + { + "146.148.77.31", + "130.211.139.141", + "107.178.218.53", + }; + + public static RedisSentinel CreateSentinel() + { + var sentinel = new RedisSentinel(SentinelHosts); + return sentinel; + } + + public static RedisSentinel CreateGCloudSentinel() + { + var sentinel = new RedisSentinel(GoogleCloudSentinelHosts, masterName: "master") + { + IpAddressMap = + { + {"10.240.34.152", "146.148.77.31"}, + {"10.240.203.193", "130.211.139.141"}, + {"10.240.209.52", "107.178.218.53"}, + } + }; + return sentinel; + } + + public static void StartRedisServer(int port) + { + var exePath = new FileInfo("~/../../src/sentinel/redis/redis-server.exe".MapProjectPath()).FullName; +#if NETCORE + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + exePath = "redis-server"; +#endif + var configDir = "~/../../src/sentinel/redis-{0}/".Fmt(port).MapProjectPath(); + var configPath = Path.Combine(configDir, "redis.conf"); + + File.WriteAllText(configPath, + File.ReadAllText(Path.Combine(configDir,"redis.windows.conf")).Replace( + @"C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-{0}".Fmt(port), + configDir.Replace(@"\", @"\\") + ) + ); + + var pInfo = new ProcessStartInfo + { + FileName = exePath, + Arguments = new FileInfo(configPath).FullName, + RedirectStandardError = true, + RedirectStandardOutput = true, + UseShellExecute = false, + CreateNoWindow = true, + }; + + var result = Process.Start(pInfo); + + ThreadPool.QueueUserWorkItem(state => Process.Start(pInfo)); + } + + public static void StartRedisSentinel(int port) + { + var exePath = new FileInfo("~/../../src/sentinel/redis/redis-server.exe".MapProjectPath()).FullName; +#if NETCORE + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + exePath = "redis-server"; +#endif + var configDir = "~/../../src/sentinel/redis-{0}/".Fmt(port).MapProjectPath(); + var configPath = Path.Combine(configDir, "redis.sentinel.conf"); + + File.WriteAllText(configPath, + File.ReadAllText(Path.Combine(configDir,"sentinel.conf")).Replace( + @"C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-{0}".Fmt(port), + configDir.Replace(@"\", @"\\") + ) + ); + + + var pInfo = new ProcessStartInfo + { + FileName = exePath, + Arguments = new FileInfo(configPath).FullName + " --sentinel", + RedirectStandardError = true, + RedirectStandardOutput = true, + UseShellExecute = false, + CreateNoWindow = true, + }; + + ThreadPool.QueueUserWorkItem(state => Process.Start(pInfo)); + } + + public static void StartAllRedisServers(int waitMs = 1500) + { + if (DisableLocalServers) + return; + + foreach (var port in RedisPorts) + { + StartRedisServer(port); + } + if (waitMs > 0) + Thread.Sleep(waitMs); + } + + public static void StartAllRedisSentinels(int waitMs = 1500) + { + if (DisableLocalServers) + return; + + foreach (var port in RedisPorts) + { + StartRedisSentinel(port); + } + if (waitMs > 0) + Thread.Sleep(waitMs); + } + + public static void ShutdownAllRedisServers() + { + if (DisableLocalServers) + return; + + foreach (var port in RedisPorts) + { + try + { + var client = new RedisClient("127.0.0.1", port); + client.ShutdownNoSave(); + } + catch (Exception ex) + { + "Error trying to shutdown {0}".Print(port); + ex.Message.Print(); + } + } + } + + public static void ShutdownAllRedisSentinels() + { + if (DisableLocalServers) + return; + + foreach (var port in SentinelPorts) + { + try + { + var client = new RedisClient("127.0.0.1", port); + client.ShutdownNoSave(); + } + catch (Exception ex) + { + "Error trying to shutdown {0}".Print(port); + ex.Message.Print(); + } + } + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs new file mode 100644 index 00000000..4302719d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs @@ -0,0 +1,193 @@ +using System; +using System.Threading; +using NUnit.Framework; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests.Sentinel +{ + [TestFixture, Category("Integration")] + public class RedisSentinelTests + : RedisSentinelTestBase + { + [OneTimeSetUp] + public void OnBeforeTestFixture() + { + StartAllRedisServers(); + StartAllRedisSentinels(); + LogManager.LogFactory = new ConsoleLogFactory(debugEnabled:true); + } + + [OneTimeTearDown] + public void OnAfterTestFixture() + { + ShutdownAllRedisSentinels(); + ShutdownAllRedisServers(); + } + + protected RedisClient RedisSentinel; + + [SetUp] + public void OnBeforeEachTest() + { + var parts = SentinelHosts[0].SplitOnFirst(':'); + RedisSentinel = new RedisClient(parts[0], int.Parse(parts[1])); + } + + [TearDown] + public void OnAfterEachTest() + { + RedisSentinel.Dispose(); + } + + [Test] + public void Can_Ping_Sentinel() + { + Assert.True(RedisSentinel.Ping()); + } + + [Test] + public void Can_Get_Sentinel_Masters() + { + var masters = RedisSentinel.SentinelMasters(); + masters.PrintDump(); + + Assert.That(masters.Count, Is.GreaterThan(0)); + } + + [Test] + public void Can_Get_Sentinel_Master() + { + var master = RedisSentinel.SentinelMaster(MasterName); + master.PrintDump(); + + var host = "{0}:{1}".Fmt(master["ip"], master["port"]); + Assert.That(master["name"], Is.EqualTo(MasterName)); + Assert.That(host, Is.EqualTo(MasterHosts[0])); + } + + [Test] + public void Can_Get_Sentinel_Replicas() + { + var replicas = RedisSentinel.SentinelSlaves(MasterName); + replicas.PrintDump(); + + Assert.That(replicas.Count, Is.GreaterThan(0)); + } + + [Test] + public void Can_Get_Sentinel_Sentinels() + { + var sentinels = RedisSentinel.SentinelSentinels(MasterName); + sentinels.PrintDump(); + + Assert.That(sentinels.Count, Is.GreaterThan(0)); + } + + [Test] + public void Can_Get_Master_Addr() + { + var addr = RedisSentinel.SentinelGetMasterAddrByName(MasterName); + + string host = addr[0]; + string port = addr[1]; + var hostString = "{0}:{1}".Fmt(host, port); + + // IP of localhost + Assert.That(hostString, Is.EqualTo(MasterHosts[0])); + } + + [Test] + public void Does_scan_for_other_active_sentinels() + { + using var sentinel = new RedisSentinel(SentinelHosts[0]) { + ScanForOtherSentinels = true + }; + var clientsManager = sentinel.Start(); + + Assert.That(sentinel.SentinelHosts, Is.EquivalentTo(SentinelHosts)); + + using var client = clientsManager.GetClient(); + Assert.That(client.GetHostString(), Is.EqualTo(MasterHosts[0])); + } + + [Test] + public void Can_Get_Redis_ClientsManager() + { + using var sentinel = CreateSentinel(); + var clientsManager = sentinel.Start(); + using var client = clientsManager.GetClient(); + Assert.That(client.GetHostString(), Is.EqualTo(MasterHosts[0])); + } + + [Test] + public void Can_specify_Timeout_on_RedisManager() + { + using var sentinel = CreateSentinel(); + sentinel.RedisManagerFactory = (masters, replicas) => new PooledRedisClientManager(masters, replicas) { IdleTimeOutSecs = 20 }; + + using var clientsManager = (PooledRedisClientManager)sentinel.Start(); + using var client = clientsManager.GetClient(); + Assert.That(clientsManager.IdleTimeOutSecs, Is.EqualTo(20)); + Assert.That(((RedisNativeClient)client).IdleTimeOutSecs, Is.EqualTo(20)); + } + + [Test] + public void Can_specify_db_on_RedisSentinel() + { + using var sentinel = CreateSentinel(); + sentinel.HostFilter = host => "{0}?db=1".Fmt(host); + + using var clientsManager = sentinel.Start(); + using var client = clientsManager.GetClient(); + Assert.That(client.Db, Is.EqualTo(1)); + } + + [Test] + [Ignore("Long running test")] + public void Run_sentinel_for_10_minutes() + { + ILog log = LogManager.GetLogger(GetType()); + + using (var sentinel = CreateSentinel()) + { + sentinel.OnFailover = manager => "Redis Managers Failed Over to new hosts".Print(); + sentinel.OnWorkerError = ex => "Worker error: {0}".Print(ex); + sentinel.OnSentinelMessageReceived = (channel, msg) => "Received '{0}' on channel '{1}' from Sentinel".Print(channel, msg); + + using (var redisManager = sentinel.Start()) + { + var aTimer = new Timer((state) => + { + "Incrementing key".Print(); + + string key = null; + using (var redis = redisManager.GetClient()) + { + var counter = redis.Increment("key", 1); + key = "key" + counter; + log.InfoFormat("Set key {0} in read/write client", key); + redis.SetValue(key, "value" + 1); + } + + using (var redis = redisManager.GetClient()) + { + log.InfoFormat("Get key {0} in read-only client...", key); + var value = redis.GetValue(key); + log.InfoFormat("{0} = {1}", key, value); + } + }, null, 0, 1000); + } + } + + Thread.Sleep(TimeSpan.FromMinutes(10)); + } + + [Test] + public void Defaults_to_default_sentinel_port() + { + var sentinelEndpoint = "127.0.0.1".ToRedisEndpoint(defaultPort: RedisConfig.DefaultPortSentinel); + Assert.That(sentinelEndpoint.Port, Is.EqualTo(RedisConfig.DefaultPortSentinel)); + } + } +} diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj b/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj new file mode 100644 index 00000000..2d59d514 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj @@ -0,0 +1,51 @@ + + + + net472;net6.0 + portable + ServiceStack.Redis.Tests.Sentinel + Library + ServiceStack.Redis.Tests.Sentinel + false + false + false + false + false + false + false + false + + + + + + + + + + + + + + + $(DefineConstants);NET45 + + + + + + + + + + + + + + $(DefineConstants);NETCORE;NET6_0;NET6_0_OR_GREATER + + + + + + diff --git a/tests/ServiceStack.Redis.Tests/AdhocClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/AdhocClientTests.Async.cs new file mode 100644 index 00000000..0bbf678f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/AdhocClientTests.Async.cs @@ -0,0 +1,25 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class AdhocClientTestsAsync + { + [Test] + public async Task Search_Test() + { + await using var client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + + const string cacheKey = "urn+metadata:All:SearchProProfiles?SwanShinichi Osawa /0/8,0,0,0"; + const long value = 1L; + await client.SetAsync(cacheKey, value); + var result = await client.GetAsync(cacheKey); + + Assert.That(result, Is.EqualTo(value)); + } + + // remaining tests from parent do not touch redis + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs b/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs index 0c8236e6..254f1668 100644 --- a/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs @@ -2,28 +2,26 @@ using System.Diagnostics; using System.Security.Cryptography; using NUnit.Framework; -using ServiceStack.Common.Utils; using System.Text; -using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class AdhocClientTests - { - [Test] - public void Search_Test() - { - using (var client = new RedisClient(TestConfig.SingleHost)) - { - const string cacheKey = "urn+metadata:All:SearchProProfiles?SwanShinichi Osawa /0/8,0,0,0"; - const long value = 1L; - client.Set(cacheKey, value); - var result = client.Get(cacheKey); - - Assert.That(result, Is.EqualTo(value)); - } - } + [TestFixture, Category("Integration")] + public class AdhocClientTests + { + [Test] + public void Search_Test() + { + using (var client = new RedisClient(TestConfig.SingleHost)) + { + const string cacheKey = "urn+metadata:All:SearchProProfiles?SwanShinichi Osawa /0/8,0,0,0"; + const long value = 1L; + client.Set(cacheKey, value); + var result = client.Get(cacheKey); + + Assert.That(result, Is.EqualTo(value)); + } + } public string CalculateMD5Hash(string input) { @@ -31,7 +29,7 @@ public string CalculateMD5Hash(string input) var md5 = MD5.Create(); byte[] inputBytes = Encoding.ASCII.GetBytes(input); byte[] hash = md5.ComputeHash(inputBytes); - + // step 2, convert byte array to hex string var sb = new StringBuilder(); for (int i = 0; i < hash.Length; i++) @@ -41,69 +39,68 @@ public string CalculateMD5Hash(string input) return sb.ToString(); } - [Test] - public void Can_infer_utf8_bytes() - { - var cmd = "GET" + 2 + "\r\n"; - var cmdBytes = System.Text.Encoding.UTF8.GetBytes(cmd); + [Test] + public void Can_infer_utf8_bytes() + { + var cmd = "GET" + 2 + "\r\n"; + var cmdBytes = System.Text.Encoding.UTF8.GetBytes(cmd); - var hex = BitConverter.ToString(cmdBytes); + var hex = BitConverter.ToString(cmdBytes); - Debug.WriteLine(hex); + Debug.WriteLine(hex); - Debug.WriteLine(BitConverter.ToString("G".ToUtf8Bytes())); - Debug.WriteLine(BitConverter.ToString("E".ToUtf8Bytes())); - Debug.WriteLine(BitConverter.ToString("T".ToUtf8Bytes())); - Debug.WriteLine(BitConverter.ToString("2".ToUtf8Bytes())); - Debug.WriteLine(BitConverter.ToString("\r".ToUtf8Bytes())); - Debug.WriteLine(BitConverter.ToString("\n".ToUtf8Bytes())); + Debug.WriteLine(BitConverter.ToString("G".ToUtf8Bytes())); + Debug.WriteLine(BitConverter.ToString("E".ToUtf8Bytes())); + Debug.WriteLine(BitConverter.ToString("T".ToUtf8Bytes())); + Debug.WriteLine(BitConverter.ToString("2".ToUtf8Bytes())); + Debug.WriteLine(BitConverter.ToString("\r".ToUtf8Bytes())); + Debug.WriteLine(BitConverter.ToString("\n".ToUtf8Bytes())); - var bytes = new[] { (byte)'\r', (byte)'\n', (byte)'0', (byte)'9', }; - Debug.WriteLine(BitConverter.ToString(bytes)); - } + var bytes = new[] { (byte)'\r', (byte)'\n', (byte)'0', (byte)'9', }; + Debug.WriteLine(BitConverter.ToString(bytes)); + } - [Test] - public void Convert_int() - { - var test = 1234; - Debug.WriteLine(BitConverter.ToString(1234.ToString().ToUtf8Bytes())); - } + [Test] + public void Convert_int() + { + Debug.WriteLine(BitConverter.ToString(1234.ToString().ToUtf8Bytes())); + } - private static byte[] GetCmdBytes1(char cmdPrefix, int noOfLines) - { - var cmd = cmdPrefix.ToString() + noOfLines.ToString() + "\r\n"; - return cmd.ToUtf8Bytes(); - } + private static byte[] GetCmdBytes1(char cmdPrefix, int noOfLines) + { + var cmd = cmdPrefix.ToString() + noOfLines.ToString() + "\r\n"; + return cmd.ToUtf8Bytes(); + } - private static byte[] GetCmdBytes2(char cmdPrefix, int noOfLines) - { - var strLines = noOfLines.ToString(); - var cmdBytes = new byte[1 + strLines.Length + 2]; - cmdBytes[0] = (byte)cmdPrefix; + private static byte[] GetCmdBytes2(char cmdPrefix, int noOfLines) + { + var strLines = noOfLines.ToString(); + var cmdBytes = new byte[1 + strLines.Length + 2]; + cmdBytes[0] = (byte)cmdPrefix; - for (var i = 0; i < strLines.Length; i++) - cmdBytes[i + 1] = (byte)strLines[i]; + for (var i = 0; i < strLines.Length; i++) + cmdBytes[i + 1] = (byte)strLines[i]; - cmdBytes[cmdBytes.Length - 2] = 0x0D; // \r - cmdBytes[cmdBytes.Length - 1] = 0x0A; // \n + cmdBytes[cmdBytes.Length - 2] = 0x0D; // \r + cmdBytes[cmdBytes.Length - 1] = 0x0A; // \n - return cmdBytes; - } + return cmdBytes; + } - [Test] - public void Compare_GetCmdBytes() - { - var res1 = GetCmdBytes1('$', 1234); - var res2 = GetCmdBytes2('$', 1234); + [Test] + public void Compare_GetCmdBytes() + { + var res1 = GetCmdBytes1('$', 1234); + var res2 = GetCmdBytes2('$', 1234); - Debug.WriteLine(BitConverter.ToString(res1)); - Debug.WriteLine(BitConverter.ToString(res2)); + Debug.WriteLine(BitConverter.ToString(res1)); + Debug.WriteLine(BitConverter.ToString(res2)); - var ticks1 = PerfUtils.Measure(1000000, () => GetCmdBytes1('$', 2)); - var ticks2 = PerfUtils.Measure(1000000, () => GetCmdBytes2('$', 2)); + var ticks1 = PerfUtils.Measure(() => GetCmdBytes1('$', 2)); + var ticks2 = PerfUtils.Measure(() => GetCmdBytes2('$', 2)); - Debug.WriteLine(String.Format("{0} : {1} = {2}", ticks1, ticks2, ticks1 / (double)ticks2)); - } + Debug.WriteLine(String.Format("{0} : {1} = {2}", ticks1, ticks2, ticks1 / (double)ticks2)); + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/App.config b/tests/ServiceStack.Redis.Tests/App.config new file mode 100644 index 00000000..c39009c1 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/App.config @@ -0,0 +1,6 @@ + + + + + + diff --git a/tests/ServiceStack.Redis.Tests/AsyncImplementationsTests.Async.cs b/tests/ServiceStack.Redis.Tests/AsyncImplementationsTests.Async.cs new file mode 100644 index 00000000..3b835c38 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/AsyncImplementationsTests.Async.cs @@ -0,0 +1,882 @@ +// Copyright (c) Service Stack LLC. All Rights Reserved. +// License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt + +using NUnit.Framework; +using ServiceStack.Caching; +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Redis.Support.Locking; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Drawing.Text; +using System.Globalization; +using System.Linq; +using System.Linq.Expressions; +using System.Reflection; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + // verify that anything that implements IFoo also implements IFooAsync + [Category("Async")] + public class AsyncImplementationTests + { + private static readonly Type[] AllTypes + = typeof(RedisClient).Assembly.GetTypes() + .Concat(typeof(AsyncImplementationTests).Assembly.GetTypes()) + .Where(x => x.IsClass) + .OrderBy(x => x.FullName).ToArray(); + + private string Log(string message) + { + TestContext.Out.WriteLine(message); + return message; + } + + [TestCase(typeof(ICacheClient), typeof(ICacheClientAsync))] + [TestCase(typeof(IEntityStore), typeof(IEntityStoreAsync))] + [TestCase(typeof(IEntityStore<>), typeof(IEntityStoreAsync<>))] + [TestCase(typeof(IRedisClient), typeof(IRedisClientAsync))] + + [TestCase(typeof(IRedisClientsManager), typeof(IRedisClientsManagerAsync))] + [TestCase(typeof(IRedisNativeClient), typeof(IRedisNativeClientAsync))] + [TestCase(typeof(IRedisPipeline), typeof(IRedisPipelineAsync))] + [TestCase(typeof(IRedisPipelineShared), typeof(IRedisPipelineSharedAsync))] + [TestCase(typeof(IRedisQueueableOperation), typeof(IRedisQueueableOperationAsync))] + + [TestCase(typeof(IRedisQueueCompletableOperation), typeof(IRedisQueueCompletableOperationAsync))] + [TestCase(typeof(IRedisTransaction), typeof(IRedisTransactionAsync))] + [TestCase(typeof(IRedisTransactionBase), typeof(IRedisTransactionBaseAsync))] + [TestCase(typeof(IRedisTypedClient<>), typeof(IRedisTypedClientAsync<>))] + [TestCase(typeof(IRemoveByPattern), typeof(IRemoveByPatternAsync))] + + [TestCase(typeof(IDistributedLock), typeof(IDistributedLockAsync))] + [TestCase(typeof(IRedisSubscription), typeof(IRedisSubscriptionAsync))] + [TestCase(typeof(IRedisHash), typeof(IRedisHashAsync))] + [TestCase(typeof(IRedisSortedSet), typeof(IRedisSortedSetAsync))] + [TestCase(typeof(IRedisSet), typeof(IRedisSetAsync))] + + [TestCase(typeof(IRedisList), typeof(IRedisListAsync))] + [TestCase(typeof(IRedisHash<,>), typeof(IRedisHashAsync<,>))] + [TestCase(typeof(IRedisSortedSet<>), typeof(IRedisSortedSetAsync<>))] + [TestCase(typeof(IRedisSet<>), typeof(IRedisSetAsync<>))] + [TestCase(typeof(IRedisList<>), typeof(IRedisListAsync<>))] + + [TestCase(typeof(IRedisTypedPipeline<>), typeof(IRedisTypedPipelineAsync<>))] + [TestCase(typeof(IRedisTypedQueueableOperation<>), typeof(IRedisTypedQueueableOperationAsync<>))] + [TestCase(typeof(IRedisTypedTransaction<>), typeof(IRedisTypedTransactionAsync<>))] + + public void TestSameAPI(Type syncInterface, Type asyncInterface) + { + TestContext.Out.WriteLine($"Comparing '{GetCSharpTypeName(syncInterface)}' and '{GetCSharpTypeName(asyncInterface)}'..."); + + var actual = new List(); + foreach (var method in asyncInterface.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly)) + { + var tok = new MethodToken(method); + actual.Add(GetSignature(tok)); + } + + var expected = new List(); + ParameterToken cancellationParameter = new ParameterToken("token", typeof(CancellationToken), ParameterAttributes.Optional); + foreach (var method in syncInterface.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly)) + { + AddExpected(method); + } + if (asyncInterface == typeof(IRedisSortedSetAsync) + || asyncInterface == typeof(IRedisSetAsync) + || asyncInterface == typeof(IRedisListAsync)) + { + AddFrom(typeof(ICollection), nameof(ICollection.Clear)); + AddFrom(typeof(ICollection), nameof(ICollection.Add)); + AddFrom(typeof(ICollection), nameof(ICollection.Remove)); + AddFrom(typeof(ICollection), nameof(ICollection.Contains)); + AddFrom(typeof(ICollection), "get_" + nameof(ICollection.Count), true); + + if (asyncInterface == typeof(IRedisListAsync)) + { + AddFrom(typeof(IList), nameof(IList.IndexOf)); + AddFrom(typeof(IList), nameof(IList.RemoveAt)); + AddFrom(typeof(IList), "set_Item", true); + AddFrom(typeof(IList), "get_Item", true); + } + } + else if (asyncInterface == typeof(IRedisSortedSetAsync<>) + || asyncInterface == typeof(IRedisSetAsync<>) + || asyncInterface == typeof(IRedisListAsync<>)) + { + AddFrom(typeof(ICollection<>), nameof(ICollection.Clear)); + AddFrom(typeof(ICollection<>), nameof(ICollection.Add)); + AddFrom(typeof(ICollection<>), nameof(ICollection.Remove)); + AddFrom(typeof(ICollection<>), nameof(ICollection.Contains)); + AddFrom(typeof(ICollection<>), "get_" + nameof(ICollection.Count), true); + + if (asyncInterface == typeof(IRedisListAsync<>)) + { + AddFrom(typeof(IList<>), nameof(IList.IndexOf)); + AddFrom(typeof(IList<>), nameof(IList.RemoveAt)); + AddFrom(typeof(IList<>), "set_Item", true); + AddFrom(typeof(IList<>), "get_Item", true); + } + } + else if (asyncInterface == typeof(IRedisHashAsync<,>)) + { + AddFrom(typeof(ICollection<>).MakeGenericType(typeof(KeyValuePair<,>).MakeGenericType(asyncInterface.GetGenericArguments())), nameof(IDictionary.Add)); + AddFrom(typeof(IDictionary<,>), nameof(IDictionary.Add)); + AddFrom(typeof(ICollection<>), nameof(IDictionary.Clear)); + AddFrom(typeof(IDictionary<,>), nameof(IDictionary.ContainsKey)); + AddFrom(typeof(IDictionary<,>), nameof(IDictionary.Remove)); + AddFrom(typeof(ICollection<>), "get_" + nameof(IDictionary.Count), true); + } + else if (asyncInterface == typeof(IRedisHashAsync)) + { + AddFrom(typeof(ICollection>), nameof(IDictionary.Add)); + AddFrom(typeof(IDictionary), nameof(IDictionary.Add)); + AddFrom(typeof(ICollection), nameof(IDictionary.Clear)); + AddFrom(typeof(IDictionary), nameof(IDictionary.ContainsKey)); + AddFrom(typeof(IDictionary), nameof(IDictionary.Remove)); + AddFrom(typeof(ICollection), "get_" + nameof(IDictionary.Count), true); + } + else if (asyncInterface == typeof(IRedisNativeClientAsync)) + { + AddFrom(typeof(RedisClient), nameof(RedisClient.SlowlogReset)); + AddFrom(typeof(RedisClient), nameof(RedisClient.BitCount)); + AddFromTyped(typeof(RedisClient), nameof(RedisClient.ZCount), typeof(string), typeof(double), typeof(double)); + // can't expose as SlowlogItem because of interface locations + expected.Add("ValueTask SlowlogGetAsync(int? top = default, CancellationToken token = default)"); + // adding missing "exists" capability + expected.Add("ValueTask SetAsync(string key, byte[] value, bool exists, long expirySeconds = 0, long expiryMilliseconds = 0, CancellationToken token = default)"); + } + else if (asyncInterface == typeof(IRedisClientAsync)) + { + expected.Add("ValueTask GetSlowlogAsync(int? numberOfRecords = default, CancellationToken token = default)"); + expected.Add("ValueTask SlowlogResetAsync(CancellationToken token = default)"); + } + else if (asyncInterface == typeof(ICacheClientAsync)) + { + AddFrom(typeof(ICacheClientExtended), nameof(ICacheClientExtended.GetKeysByPattern)); + AddFrom(typeof(ICacheClientExtended), nameof(ICacheClientExtended.GetTimeToLive)); + AddFrom(typeof(ICacheClientExtended), nameof(ICacheClientExtended.RemoveExpiredEntries)); + } + + void AddFrom(Type syncInterface, string name, bool fromPropertyToMethod = false) + => AddExpected(syncInterface.GetMethod(name), fromPropertyToMethod); + void AddFromTyped(Type syncInterface, string name, params Type[] types) + => AddExpected(syncInterface.GetMethod(name, types), false); + + Type AsyncType(Type result) + { + bool useTask = asyncInterface == typeof(ICacheClientAsync) + || asyncInterface == typeof(IRemoveByPatternAsync) + || asyncInterface == typeof(IEntityStoreAsync) + || asyncInterface == typeof(IEntityStoreAsync<>); + + if (result is null || result == typeof(void)) + return useTask ? typeof(Task) : typeof(ValueTask); + + return (useTask ? typeof(Task<>) : typeof(ValueTask<>)).MakeGenericType(result); + } + void AddExpected(MethodInfo method, bool fromPropertyToMethod = false) + { + if (method is null) return; + var tok = new MethodToken(method); + + ParameterToken[] parameters = tok.GetParameters(); + + // think about the return type + Type returnType; + if (tok.ReturnType == typeof(void)) + { + returnType = AsyncType(tok.ReturnType); + } + else if (tok.ReturnType == typeof(IDisposable)) + { + returnType = typeof(IAsyncDisposable); + } + else if (tok.ReturnType.IsGenericType && tok.ReturnType.GetGenericTypeDefinition() == typeof(IEnumerable<>)) + { + returnType = typeof(IAsyncEnumerable<>).MakeGenericType(tok.ReturnType.GetGenericArguments()); + } + else + { + returnType = AsyncType(SwapForAsyncIfNeedeed(tok.ReturnType)); + } + string name = tok.Name + "Async"; + bool addCancellation = true; + // sniff to see if this is a delegate hook + if (parameters.Length == 0 && typeof(Delegate).IsAssignableFrom(tok.ReturnType) && name.StartsWith("get_")) + { + // property getter; replace with event add + returnType = typeof(void); + name = "add_" + name.Substring(4); + parameters = new[] { new ParameterToken("value", ActionDelegateToFunc(tok.ReturnType), default) }; + + } + else if (parameters.Length == 1 && tok.ReturnType == typeof(void) && name.StartsWith("set_") + && typeof(Delegate).IsAssignableFrom(parameters[0].ParameterType)) + { + // property setter; replace with event remove + returnType = typeof(void); + name = "remove_" + name.Substring(4); + ref ParameterToken p = ref parameters[0]; + p = p.WithParameterType(ActionDelegateToFunc(p.ParameterType)); + } + + if (name.StartsWith("get_") || name.StartsWith("set_") || name.StartsWith("add_") || name.StartsWith("remove_")) + { + bool preserve = (name.StartsWith("get_") || name.StartsWith("set_")), fullyHandled = false; + if (asyncInterface == typeof(IRedisNativeClientAsync) || asyncInterface == typeof(IRedisClientAsync) + || asyncInterface == typeof(IRedisTypedClientAsync<>)) + { + switch (tok.Name) + { + case "get_" + nameof(IRedisNativeClient.DbSize): + case "get_" + nameof(IRedisNativeClient.LastSave): + case "get_" + nameof(IRedisNativeClient.Info): + fromPropertyToMethod = true; + preserve = false; + break; + case "set_" + nameof(IRedisNativeClient.Db): + name = nameof(IRedisNativeClientAsync.SelectAsync); + parameters[0] = parameters[0].WithName("db"); + fullyHandled = true; + break; + case "set_" + nameof(IRedisClientAsync.Hashes): + case "set_" + nameof(IRedisClientAsync.Lists): + case "set_" + nameof(IRedisClientAsync.Sets): + case "set_" + nameof(IRedisClientAsync.SortedSets): + return; // no "set" included + case "get_Item": + case "set_Item": + return; // no indexer + } + } + + if (fromPropertyToMethod) + { + name = name switch + { + "get_ItemAsync" => "ElementAtAsync", + "set_ItemAsync" => "SetValueAsync", + _ => name.Substring(4), // don't worry about the remove, that isn't in this catchment + }; + } + else if (preserve && !fullyHandled) + { // just keep it the same by default + name = tok.Name; + returnType = SwapForAsyncIfNeedeed(tok.ReturnType); + addCancellation = false; + } + + else if (fullyHandled) { } + else + { + addCancellation = false; + } + } + + static Type ActionDelegateToFunc(Type type) + { + if (type.IsGenericType) + { + var genDef = type.GetGenericTypeDefinition(); + var targs = type.GetGenericArguments(); + Array.Resize(ref targs, targs.Length + 1); + targs[targs.Length - 1] = typeof(ValueTask); + return Expression.GetFuncType(targs); + } + return type; + } + + if (asyncInterface == typeof(IRedisQueueCompletableOperationAsync) && parameters.Length == 1) + { + // very unusual case; Func => Func> + returnType = typeof(void); + ref ParameterToken p = ref parameters[0]; + if (p.ParameterType == typeof(Action)) + { + p = p.WithParameterType(typeof(Func)); + } + else + { + p = p.WithParameterType(typeof(Func<,>).MakeGenericType( + typeof(CancellationToken), typeof(ValueTask<>).MakeGenericType(p.ParameterType.GetGenericArguments()))); + } + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + } + else if (asyncInterface == typeof(IRedisQueueableOperationAsync) || asyncInterface == typeof(IRedisTypedQueueableOperationAsync<>)) + { + // very unusual case; Func => Func> + if (parameters.Length != 3) return; // move to optionals rather than overloads + ref ParameterToken p = ref parameters[0]; // fixup the delegate type + if (p.ParameterType.IsGenericType) + { + var genDef = p.ParameterType.GetGenericTypeDefinition(); + Type[] funcTypes = p.ParameterType.GetGenericArguments(); + funcTypes[0] = SwapForAsyncIfNeedeed(funcTypes[0]); + + if (genDef == typeof(Action<>)) + { + Array.Resize(ref funcTypes, funcTypes.Length + 1); + funcTypes[funcTypes.Length - 1] = typeof(ValueTask); + } + else + { + funcTypes[funcTypes.Length - 1] = typeof(ValueTask<>) + .MakeGenericType(funcTypes[funcTypes.Length - 1]); + } + + p = p.WithParameterType(typeof(Func<,>).MakeGenericType(funcTypes)); + } + + // make the other parameters optional + p = ref parameters[1]; + p = p.WithAttributes(p.Attributes | ParameterAttributes.Optional); + p = ref parameters[2]; + p = p.WithAttributes(p.Attributes | ParameterAttributes.Optional); + returnType = typeof(void); + name = method.Name; // retain the original name + + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + } + else + { + for (int i = 0; i < parameters.Length; i++) + { + ref ParameterToken p = ref parameters[i]; + Type type = p.ParameterType, swapped = SwapForAsyncIfNeedeed(type); + if (type != swapped) + { + p = p.WithParameterType(swapped); + } + } + + static bool IsParams(in MethodToken tok) + { + var ps = tok.GetParameters(); + if (ps is null || ps.Length == 0) return false; + return ps.Last().IsDefined(typeof(ParamArrayAttribute)); + } + + if (IsParams(tok)) + { + // include it with params but without CancellationToken + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + + // and now remove the params so we can get with CancellationToken + ref ParameterToken p = ref parameters[parameters.Length - 1]; + p = p.WithAllAttributes(p.AllAttributes().Where(a => !(a is ParamArrayAttribute)).ToArray()); + } + + if (asyncInterface == typeof(IDistributedLockAsync) && name == nameof(IDistributedLockAsync.LockAsync)) + { + // can't use "out", so uses a new LockState type instead + returnType = typeof(ValueTask); + parameters = RemoveByRef(parameters); + + static ParameterToken[] RemoveByRef(ParameterToken[] parameters) + { + if (parameters.Any(x => x.ParameterType.IsByRef)) + { + parameters = parameters.Where(x => !x.ParameterType.IsByRef).ToArray(); + } + return parameters; + } + } + if (asyncInterface == typeof(IRedisNativeClientAsync)) + { + switch (tok.Name) + { + case nameof(IRedisNativeClient.DecrBy): + case nameof(IRedisNativeClient.IncrBy): + parameters[1] = parameters[1].WithParameterType(typeof(long)); + returnType = typeof(ValueTask); + break; + case nameof(IRedisNativeClient.Shutdown): + Insert(ref parameters, 0, new ParameterToken("noSave", typeof(bool), ParameterAttributes.Optional, false)); + break; + case nameof(IRedisNativeClient.Set): + Insert(ref parameters, 2, new ParameterToken("expirySeconds", typeof(long), ParameterAttributes.Optional, 0)); + Insert(ref parameters, 3, new ParameterToken("expiryMilliseconds", typeof(long), ParameterAttributes.Optional, 0)); + break; + } + + static void Insert(ref ParameterToken[] parameters, int index, ParameterToken value) + { + // don't try to be clever; this is inefficient but correct + var list = parameters.ToList(); + list.Insert(index, value); + parameters = list.ToArray(); + } + } + + if (asyncInterface == typeof(IRedisSubscriptionAsync) && tok.Name == "get_" + nameof(IRedisSubscription.SubscriptionCount)) + { + // this is a purely client value; don't treat as async + name = tok.Name; + returnType = tok.ReturnType; + } + + if (asyncInterface == typeof(IRedisClientAsync) || asyncInterface == typeof(IRedisTypedClientAsync<>)) + { + switch (tok.Name) + { + case nameof(IRedisClient.UrnKey): + case nameof(IRedisClient.As): + addCancellation = false; + name = tok.Name; + returnType = SwapForAsyncIfNeedeed(tok.ReturnType); + break; + case nameof(IRedisClient.Save): // to avoid AsyncAsync and overloaded meaning of Async + name = nameof(IRedisClientAsync.ForegroundSaveAsync); + break; + case nameof(IRedisClient.SaveAsync): // to avoid AsyncAsync and overloaded meaning of Async + name = nameof(IRedisClientAsync.BackgroundSaveAsync); + break; + case nameof(IRedisClient.RewriteAppendOnlyFileAsync): // for consistency + name = nameof(IRedisClientAsync.BackgroundRewriteAppendOnlyFileAsync); + break; + case nameof(IRedisClient.ExecCachedLua): + // Func scriptSha1 => Func> scriptSha1 + parameters[1] = parameters[1].WithParameterType(typeof(Func<,>).MakeGenericType(typeof(string), typeof(ValueTask<>).MakeGenericType(method.GetGenericArguments()))); + break; + case nameof(IRedisClient.AcquireLock) when asyncInterface == typeof(IRedisClientAsync): + if (parameters.Length != 2) return; // 2 overloads combined into 1 + parameters[1] = parameters[1].AsNullable().AsOptional(); + returnType = typeof(ValueTask<>).MakeGenericType(returnType); // add await for acquisition + break; + case nameof(IRedisClient.AcquireLock) when asyncInterface == typeof(IRedisTypedClientAsync<>): + if (parameters.Length != 1) return; // 2 overloads combined into 1 + parameters[0] = parameters[0].AsNullable().AsOptional(); + returnType = typeof(ValueTask<>).MakeGenericType(returnType); // add await for acquisition + break; + case nameof(IRedisClient.SetValueIfExists) when asyncInterface == typeof(IRedisClientAsync): + case nameof(IRedisClient.SetValueIfNotExists) when asyncInterface == typeof(IRedisClientAsync): + if (parameters.Length != 3) return; // 2 overloads combined into 1 + parameters[2] = parameters[2].AsNullable().AsOptional(); + break; + case nameof(IRedisClient.CreatePipeline): + case nameof(IRedisTypedClient.GetHash): + addCancellation = false; + name = tok.Name; + returnType = SwapForAsyncIfNeedeed(tok.ReturnType); + break; + } + } + + for (int i = 0; i < parameters.Length; i++) + { + ref ParameterToken p = ref parameters[i]; + var type = p.ParameterType; + if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(Dictionary<,>)) + { + // prefer IDictionary<,> to Dictionary<,> + p = p.WithParameterType(typeof(IDictionary<,>).MakeGenericType(type.GetGenericArguments())); + } + } + + // append optional CancellationToken + if (addCancellation) + { + Array.Resize(ref parameters, parameters.Length + 1); + parameters[parameters.Length - 1] = cancellationParameter; + } + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + } + } + + actual.Sort(); + expected.Sort(); + int extra = 0, missing = 0, match = 0; + Log($"actual: {actual.Count}, expected: {expected.Count}"); + foreach (var method in actual.Except(expected)) + { + Log($"+ {method}"); + extra++; + } + foreach (var method in expected.Except(actual)) + { + Log($"- {method}"); + missing++; + } + foreach (var method in expected.Intersect(actual)) + { + Log($"= {method}"); + match++; + } + Assert.True(extra == 0 && missing == 0, $"signature mismatch on {GetCSharpTypeName(asyncInterface)}; missing: {missing}, extra: {extra}, match: {match}"); + + + static Type SwapForAsyncIfNeedeed(Type type) + { + if (type.IsArray) + { + var t = type.GetElementType(); + var swapped = SwapForAsyncIfNeedeed(t); + if (t != swapped) + { + var rank = type.GetArrayRank(); + return swapped.MakeArrayType(rank); + } + return type; + } + if (type == typeof(IRedisClient)) return typeof(IRedisClientAsync); + if (type == typeof(ICacheClient)) return typeof(ICacheClientAsync); + if (type == typeof(IRedisPipeline)) return typeof(IRedisPipelineAsync); + if (type == typeof(IRedisPipelineShared)) return typeof(IRedisPipelineSharedAsync); + if (type == typeof(IDisposable)) return typeof(IAsyncDisposable); + if (type == typeof(IRedisList)) return typeof(IRedisListAsync); + if (type == typeof(IRedisSet)) return typeof(IRedisSetAsync); + if (type == typeof(IRedisSortedSet)) return typeof(IRedisSortedSetAsync); + if (type == typeof(IRedisHash)) return typeof(IRedisHashAsync); + if (type == typeof(IRedisSubscription)) return typeof(IRedisSubscriptionAsync); + if (type == typeof(IRedisTransaction)) return typeof(IRedisTransactionAsync); + + if (type.IsGenericType) + { + var genDef = type.GetGenericTypeDefinition(); + var targs = type.GetGenericArguments(); + for (int i = 0; i < targs.Length; i++) + targs[i] = SwapForAsyncIfNeedeed(targs[i]); + + if (genDef == typeof(IRedisTypedClient<>)) return typeof(IRedisTypedClientAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisList<>)) return typeof(IRedisListAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisSet<>)) return typeof(IRedisSetAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisSortedSet<>)) return typeof(IRedisSortedSetAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisTypedTransaction<>)) return typeof(IRedisTypedTransactionAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisHash<,>)) return typeof(IRedisHashAsync<,>).MakeGenericType(targs); + if (genDef == typeof(IRedisTypedPipeline<>)) return typeof(IRedisTypedPipelineAsync<>).MakeGenericType(targs); + + return genDef.MakeGenericType(targs); + } + + return type; + } + } + + static string GetCSharpTypeName(Type type) + { + if (!(type.IsGenericType || type.IsArray)) + { + return GetSimpleCSharpTypeName(type); + } + var sb = new StringBuilder(); + AppendCSharpTypeName(type, sb); + return sb.ToString(); + } + static string GetSimpleCSharpTypeName(Type type) + { + if (type == typeof(void)) return "void"; + if (type == typeof(bool)) return "bool"; + if (type == typeof(sbyte)) return "sbyte"; + if (type == typeof(short)) return "short"; + if (type == typeof(int)) return "int"; + if (type == typeof(long)) return "long"; + if (type == typeof(byte)) return "byte"; + if (type == typeof(ushort)) return "ushort"; + if (type == typeof(uint)) return "uint"; + if (type == typeof(ulong)) return "ulong"; + if (type == typeof(string)) return "string"; + if (type == typeof(double)) return "double"; + if (type == typeof(float)) return "float"; + if (type == typeof(object)) return "object"; + + return type.Name; + } + + static void AppendCSharpTypeName(Type type, StringBuilder sb) + { + if (type.IsArray) + { + // we won't worry about the difference between vector and non-vector rank zero arrays + AppendCSharpTypeName(type.GetElementType(), sb); + sb.Append("[").Append(',', type.GetArrayRank() - 1).Append("]"); + } + else if (type.IsGenericParameter) + { + sb.Append(type.Name); + } + else if (type.IsGenericType) + { + var nullable = Nullable.GetUnderlyingType(type); + if (nullable is object) + { + AppendCSharpTypeName(nullable, sb); + sb.Append("?"); + } + else + { + var name = type.Name; + int i = name.IndexOf('`'); + if (i < 0) + { + sb.Append(name); + } + else + { + sb.Append(name, 0, i); + } + sb.Append("<"); + var targs = type.GetGenericArguments(); + for (i = 0; i < targs.Length; i++) + { + if (i != 0) sb.Append(", "); + sb.Append(GetCSharpTypeName(targs[i])); + } + sb.Append(">"); + } + } + else + { + sb.Append(GetSimpleCSharpTypeName(type)); + } + } + static string GetSignature(MethodToken method) + { + var sb = new StringBuilder(); + AppendCSharpTypeName(method.ReturnType, sb); + sb.Append(' ').Append(method.Name); + if (method.IsGenericMethodDefinition) + { + sb.Append('<'); + var args = method.GetGenericArguments(); + for (int i = 0; i < args.Length; i++) + { + if (i != 0) sb.Append(", "); + sb.Append(args[i].Name); + } + sb.Append('>'); + } + sb.Append('('); + var ps = method.GetParameters(); + for (int i = 0; i < ps.Length; i++) + { + var p = ps[i]; + if (i != 0) sb.Append(", "); + if (p.IsDefined(typeof(ParamArrayAttribute))) + { + sb.Append("params "); + } + if (p.ParameterType.IsByRef) + { + const ParameterAttributes InOut = ParameterAttributes.In | ParameterAttributes.Out; + sb.Append((p.Attributes & InOut) switch + { + ParameterAttributes.In => "in", + ParameterAttributes.Out => "out", + _ => "ref" + }).Append(' '); + AppendCSharpTypeName(p.ParameterType.GetElementType(), sb); + } + else + { + AppendCSharpTypeName(p.ParameterType, sb); + } + sb.Append(' ').Append(p.Name); + if ((p.Attributes & ParameterAttributes.Optional) == ParameterAttributes.Optional) + { + sb.Append(" = "); + switch (p.DefaultValue) + { + case null: + case DBNull _: // used for delegates, honest! + sb.Append("default"); + break; + case string s: + sb.Append(@"""").Append(s.Replace(@"""", @"""""")).Append(@""""); + break; + case object o: + sb.Append(Convert.ToString(o, CultureInfo.InvariantCulture)); + break; + } + } + } + return sb.Append(')').ToString(); + } + + readonly struct ParameterToken + { + public bool IsDefined(Type attributeType) + => _allAttributes.Any(a => attributeType.IsAssignableFrom(a.GetType())); + public object DefaultValue { get; } + public ParameterAttributes Attributes { get; } + public string Name { get; } + public Type ParameterType { get; } + private readonly object[] _allAttributes; + public object[] AllAttributes() => MethodToken.Clone(_allAttributes); + + internal ParameterToken WithAllAttributes(params object[] allAttributes) + => new ParameterToken(Name, ParameterType, Attributes, DefaultValue, allAttributes); + + internal ParameterToken WithParameterType(Type parameterType) + => new ParameterToken(Name, parameterType, Attributes, DefaultValue, _allAttributes); + + internal ParameterToken AsNullable() + { + if (!ParameterType.IsValueType) return this; // already nullable (ish) + var existing = Nullable.GetUnderlyingType(ParameterType); + if (existing is object) return this; // already nullable + return WithParameterType(typeof(Nullable<>).MakeGenericType(ParameterType)); + } + + internal ParameterToken AsOptional() + => WithAttributes(Attributes | ParameterAttributes.Optional); + + internal ParameterToken WithAttributes(ParameterAttributes attributes) + => new ParameterToken(Name, ParameterType, attributes, DefaultValue, _allAttributes); + + internal ParameterToken WithName(string name) + => new ParameterToken(name, ParameterType, Attributes, DefaultValue, _allAttributes); + + public ParameterToken(ParameterInfo source) + { + Name = source.Name; + ParameterType = source.ParameterType; + Attributes = source.Attributes; + DefaultValue = source.DefaultValue; + _allAttributes = source.AllAttributes(); + } + + public ParameterToken(string name, Type parameterType, ParameterAttributes attributes, object defaultValue = default, params object[] allAttributes) + { + Name = name; + ParameterType = parameterType; + Attributes = attributes; + DefaultValue = defaultValue; + _allAttributes = allAttributes ?? Array.Empty(); + } + } + + readonly struct MethodToken + { + private readonly ParameterToken[] _parameters; + private readonly Type[] _genericArguments; + private readonly object[] _allAttributes; + public bool IsDefined(Type attributeType) + => _allAttributes.Any(a => attributeType.IsAssignableFrom(a.GetType())); + internal static T[] Clone(T[] source) + { + if (source is null) return null; + var result = new T[source.Length]; + source.CopyTo(result, 0); + return result; + } + public ParameterToken[] GetParameters() => Clone(_parameters); + public Type[] GetGenericArguments() => Clone(_genericArguments); + public string Name { get; } + public bool IsGenericMethodDefinition { get; } + public bool IsGenericMethod { get; } + public Type ReturnType { get; } + public object[] AllAttributes() => Clone(_allAttributes); + public MethodToken(MethodInfo source) + { + Name = source.Name; + IsGenericMethod = source.IsGenericMethod; + IsGenericMethodDefinition = source.IsGenericMethodDefinition; + ReturnType = source.ReturnType; + _genericArguments = (source.IsGenericMethod || source.IsGenericMethodDefinition) + ? source.GetGenericArguments() : null; + var ps = source.GetParameters(); + _parameters = ps is null ? null : Array.ConvertAll(ps, p => new ParameterToken(p)); + _allAttributes = source.AllAttributes(); + } + + public MethodToken(string name, Type returnType, ParameterToken[] parameters, + bool isGenericMethod, bool isGenericMethodDefinition, Type[] genericArguments, + params object[] allAttributes) + { + Name = name; + ReturnType = returnType; + IsGenericMethod = isGenericMethod; + IsGenericMethodDefinition = isGenericMethodDefinition; + _genericArguments = genericArguments; + _parameters = parameters ?? Array.Empty(); + _allAttributes = allAttributes ?? Array.Empty(); + } + } + + [TestCase(typeof(ICacheClient), typeof(ICacheClientAsync))] + [TestCase(typeof(ICacheClientExtended), typeof(ICacheClientAsync), typeof(BasicRedisClientManager))] // duplicate not an error; APIs are coalesced + [TestCase(typeof(IEntityStore), typeof(IEntityStoreAsync))] + [TestCase(typeof(IEntityStore<>), typeof(IEntityStoreAsync<>))] + [TestCase(typeof(IRedisClient), typeof(IRedisClientAsync))] + + [TestCase(typeof(IRedisClientsManager), typeof(IRedisClientsManagerAsync))] + [TestCase(typeof(IRedisNativeClient), typeof(IRedisNativeClientAsync))] + [TestCase(typeof(IRedisPipeline), typeof(IRedisPipelineAsync))] + [TestCase(typeof(IRedisPipelineShared), typeof(IRedisPipelineSharedAsync))] + [TestCase(typeof(IRedisQueueableOperation), typeof(IRedisQueueableOperationAsync))] + + [TestCase(typeof(IRedisQueueCompletableOperation), typeof(IRedisQueueCompletableOperationAsync))] + [TestCase(typeof(IRedisTransaction), typeof(IRedisTransactionAsync))] + [TestCase(typeof(IRedisTransactionBase), typeof(IRedisTransactionBaseAsync))] + [TestCase(typeof(IRedisTypedClient<>), typeof(IRedisTypedClientAsync<>))] + [TestCase(typeof(IRemoveByPattern), typeof(IRemoveByPatternAsync))] + + [TestCase(typeof(IDistributedLock), typeof(IDistributedLockAsync))] + [TestCase(typeof(IRedisSubscription), typeof(IRedisSubscriptionAsync))] + [TestCase(typeof(IRedisHash), typeof(IRedisHashAsync))] + [TestCase(typeof(IRedisSortedSet), typeof(IRedisSortedSetAsync))] + [TestCase(typeof(IRedisSet), typeof(IRedisSetAsync))] + + [TestCase(typeof(IRedisList), typeof(IRedisListAsync))] + [TestCase(typeof(IRedisHash<,>), typeof(IRedisHashAsync<,>))] + [TestCase(typeof(IRedisSortedSet<>), typeof(IRedisSortedSetAsync<>))] + [TestCase(typeof(IRedisSet<>), typeof(IRedisSetAsync<>))] + [TestCase(typeof(IRedisList<>), typeof(IRedisListAsync<>))] + + [TestCase(typeof(IRedisTypedPipeline<>), typeof(IRedisTypedPipelineAsync<>))] + [TestCase(typeof(IRedisTypedQueueableOperation<>), typeof(IRedisTypedQueueableOperationAsync<>))] + [TestCase(typeof(IRedisTypedTransaction<>), typeof(IRedisTypedTransactionAsync<>))] + public void TestFullyImplemented(Type syncInterface, Type asyncInterface, params Type[] ignore) + { + HashSet except = new HashSet(ignore ?? Type.EmptyTypes); +#if NET472 // only exists there! + if (syncInterface == typeof(IRedisClientsManager)) + { + except.Add(typeof(ServiceStack.Redis.Support.Diagnostic.TrackingRedisClientsManager)); + } +#endif + + var syncTypes = AllTypes.Except(except).Where(x => Implements(x, syncInterface)).ToArray(); + DumpTypes(syncInterface, syncTypes); + + var asyncTypes = AllTypes.Except(except).Where(x => Implements(x, asyncInterface)).ToArray(); + DumpTypes(asyncInterface, asyncTypes); + Assert.AreEqual(syncTypes, asyncTypes); + } + + static void DumpTypes(Type @interface, Type[] classes) + { + TestContext.Out.WriteLine($"Classes that implement {@interface.Name}: {classes.Length}:"); + foreach (var @class in classes) + { + TestContext.Out.WriteLine($" {@class.FullName}"); + } + TestContext.Out.WriteLine(); + } + + static bool Implements(Type @class, Type @interface) + { + if (@interface.IsGenericTypeDefinition) + { + var found = (from iType in @class.GetInterfaces() + where iType.IsGenericType + && iType.GetGenericTypeDefinition() == @interface + select iType).SingleOrDefault(); + return found != null && found.IsAssignableFrom(@class); + } + return @interface.IsAssignableFrom(@class); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.Async.cs b/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.Async.cs new file mode 100644 index 00000000..70685fd4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.Async.cs @@ -0,0 +1,43 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + public class BasicRediscClientManagerTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_select_db() + { + var redisManager = new BasicRedisClientManager("127.0.0.1"); + + await using (var client = await redisManager.GetClientAsync()) + { + await client.SelectAsync(2); + await client.SetAsync("db", 2); + } + + await using (var client = await redisManager.GetClientAsync()) + { + await client.SelectAsync(3); + await client.SetAsync("db", 3); + } + + await using (var client = await redisManager.GetClientAsync()) + { + await client.SelectAsync(2); + //((RedisClient)client).ChangeDb(2); + var db = await client.GetAsync("db"); + Assert.That(db, Is.EqualTo(2)); + } + + redisManager = new BasicRedisClientManager("127.0.0.1?db=3"); + await using (var client = await redisManager.GetClientAsync()) + { + var db = await client.GetAsync("db"); + Assert.That(db, Is.EqualTo(3)); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.cs b/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.cs new file mode 100644 index 00000000..893afcaf --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.cs @@ -0,0 +1,41 @@ +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class BasicRediscClientManagerTests + : RedisClientTestsBase + { + [Test] + public void Can_select_db() + { + var redisManager = new BasicRedisClientManager("127.0.0.1"); + + using (var client = redisManager.GetClient()) + { + client.Db = 2; + client.Set("db", 2); + } + + using (var client = redisManager.GetClient()) + { + client.Db = 3; + client.Set("db", 3); + } + + using (var client = redisManager.GetClient()) + { + client.Db = 2; + //((RedisClient)client).ChangeDb(2); + var db = client.Get("db"); + Assert.That(db, Is.EqualTo(2)); + } + + redisManager = new BasicRedisClientManager("127.0.0.1?db=3"); + using (var client = redisManager.GetClient()) + { + var db = client.Get("db"); + Assert.That(db, Is.EqualTo(3)); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs b/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs index fe4b140f..aa4598b1 100644 --- a/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs +++ b/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs @@ -8,92 +8,92 @@ namespace ServiceStack.Redis.Tests.Benchmarks { - [TestFixture, Explicit] - public class DoubleSerializationBenchmarks - { - const int times = 100000; - - public void ResetGC() - { - GC.Collect(); - GC.WaitForPendingFinalizers(); - GC.Collect(); - } - - [Test] - public void Compare_double_serializers() - { - var initalVal = 0.3333333333333333d; - - var results = new string[times]; - - ResetGC(); - var sw = Stopwatch.StartNew(); - - for (var i = 0; i < times; i++) - { - results[i] = (initalVal + i).ToString(); - } - - Debug.WriteLine("double.ToString(): Completed in ms: " + sw.ElapsedMilliseconds); - //PrintLastValues(results, 100); - - ResetGC(); - sw = Stopwatch.StartNew(); - - for (var i = 0; i < times; i++) - { - results[i] = (initalVal + i).ToString("r"); - } - - Debug.WriteLine("double.ToString('r') completed in ms: " + sw.ElapsedMilliseconds); - //PrintLastValues(results, 100); - - //Default - ResetGC(); - sw = Stopwatch.StartNew(); - - for (var i = 0; i < times; i++) - { - results[i] = DoubleConverter.ToExactString(initalVal + i); - } - - Debug.WriteLine("DoubleConverter.ToExactString(): Completed in ms: " + sw.ElapsedMilliseconds); - //PrintLastValues(results, 100); - - //What #XBOX uses - ResetGC(); - sw = Stopwatch.StartNew(); - - for (var i = 0; i < times; i++) - { - results[i] = BitConverter.ToString(BitConverter.GetBytes(initalVal + i)); - } - - Debug.WriteLine("BitConverter.ToString() completed in ms: " + sw.ElapsedMilliseconds); - //PrintLastValues(results, 100); - - - //What Booksleeve uses - ResetGC(); - sw = Stopwatch.StartNew(); - - for (var i = 0; i < times; i++) - { - results[i] = (initalVal + i).ToString("G", CultureInfo.InvariantCulture); - } - - Debug.WriteLine("double.ToString('G') completed in ms: " + sw.ElapsedMilliseconds); - //PrintLastValues(results, 100); - } - - private static void PrintLastValues(string[] results, int count) - { - var sb = new StringBuilder(); - for (int i = times - 1; i >= (times - count); i--) - sb.AppendLine(results[i]); - Debug.WriteLine("Last {0} values: ".Fmt(count)); - Debug.WriteLine(sb); - } - } + [TestFixture, Ignore("Benchmark")] + public class DoubleSerializationBenchmarks + { + const int times = 100000; + + public void ResetGC() + { + GC.Collect(); + GC.WaitForPendingFinalizers(); + GC.Collect(); + } + + [Test] + public void Compare_double_serializers() + { + var initalVal = 0.3333333333333333d; + + var results = new string[times]; + + ResetGC(); + var sw = Stopwatch.StartNew(); + + for (var i = 0; i < times; i++) + { + results[i] = (initalVal + i).ToString(); + } + + Debug.WriteLine("double.ToString(): Completed in ms: " + sw.ElapsedMilliseconds); + //PrintLastValues(results, 100); + + ResetGC(); + sw = Stopwatch.StartNew(); + + for (var i = 0; i < times; i++) + { + results[i] = (initalVal + i).ToString("r"); + } + + Debug.WriteLine("double.ToString('r') completed in ms: " + sw.ElapsedMilliseconds); + //PrintLastValues(results, 100); + + //Default + ResetGC(); + sw = Stopwatch.StartNew(); + + for (var i = 0; i < times; i++) + { + results[i] = DoubleConverter.ToExactString(initalVal + i); + } + + Debug.WriteLine("DoubleConverter.ToExactString(): Completed in ms: " + sw.ElapsedMilliseconds); + //PrintLastValues(results, 100); + + //What #XBOX uses + ResetGC(); + sw = Stopwatch.StartNew(); + + for (var i = 0; i < times; i++) + { + results[i] = BitConverter.ToString(BitConverter.GetBytes(initalVal + i)); + } + + Debug.WriteLine("BitConverter.ToString() completed in ms: " + sw.ElapsedMilliseconds); + //PrintLastValues(results, 100); + + + //What Booksleeve uses + ResetGC(); + sw = Stopwatch.StartNew(); + + for (var i = 0; i < times; i++) + { + results[i] = (initalVal + i).ToString("G", CultureInfo.InvariantCulture); + } + + Debug.WriteLine("double.ToString('G') completed in ms: " + sw.ElapsedMilliseconds); + //PrintLastValues(results, 100); + } + + private static void PrintLastValues(string[] results, int count) + { + var sb = new StringBuilder(); + for (int i = times - 1; i >= (times - count); i--) + sb.AppendLine(results[i]); + Debug.WriteLine("Last {0} values: ".Fmt(count)); + Debug.WriteLine(sb); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Benchmarks/RedisMqHostBenchmarks.cs b/tests/ServiceStack.Redis.Tests/Benchmarks/RedisMqHostBenchmarks.cs deleted file mode 100644 index 949a7d8c..00000000 --- a/tests/ServiceStack.Redis.Tests/Benchmarks/RedisMqHostBenchmarks.cs +++ /dev/null @@ -1,89 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Redis.Messaging; - -namespace ServiceStack.Redis.Tests.Benchmarks -{ - [TestFixture, Category("Integration"), Explicit] - public class RedisMqHostBenchmarks - { - public class Incr - { - public int Value { get; set; } - } - - public class IncrBlocking - { - public int Value { get; set; } - } - - private static RedisMqHost CreateMqHost() - { - return CreateMqHost(2); - } - - private static RedisMqHost CreateMqHost(int noOfRetries) - { - var redisFactory = new BasicRedisClientManager(); - try - { - redisFactory.Exec(redis => redis.FlushAll()); - } - catch (RedisException rex) - { - Debug.WriteLine("WARNING: Redis not started? \n" + rex.Message); - } - var mqHost = new RedisMqHost(redisFactory, noOfRetries, null); - return mqHost; - } - - [Test] - public void Can_receive_and_process_same_reply_responses() - { - var mqHost = CreateMqHost(); - var called = 0; - - mqHost.RegisterHandler(m => - { - called++; - return new Incr { Value = m.GetBody().Value + 1 }; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Incr { Value = 1 }); - - Thread.Sleep(10000); - - Debug.WriteLine("Times called: " + called); - } - - [Test] - public void Can_receive_and_process_same_reply_responses_blocking() - { - var mqHost = CreateMqHost(); - var called = 0; - - mqHost.RegisterHandler(m => - { - called++; - mqHost.CreateMessageQueueClient().Publish(new IncrBlocking { Value = m.GetBody().Value + 1 }); - Thread.Sleep(100); - return null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new IncrBlocking { Value = 1 }); - - Thread.Sleep(10000); - - Debug.WriteLine("Times called: " + called); - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Benchmarks/RedisMqHostPoolBenchmarks.cs b/tests/ServiceStack.Redis.Tests/Benchmarks/RedisMqHostPoolBenchmarks.cs deleted file mode 100644 index 231143d5..00000000 --- a/tests/ServiceStack.Redis.Tests/Benchmarks/RedisMqHostPoolBenchmarks.cs +++ /dev/null @@ -1,192 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Redis.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests.Benchmarks -{ - [TestFixture, Category("Integration"), Explicit] - public class RedisMqHostPoolBenchmarks - { - public class Incr - { - public int Value { get; set; } - } - - public class IncrBlocking - { - public int Value { get; set; } - } - - private static RedisMqHostPool CreateMqHostPool(int threadCount = 1) - { - var redisFactory = new BasicRedisClientManager(); - try - { - redisFactory.Exec(redis => redis.FlushAll()); - } - catch (RedisException rex) - { - Debug.WriteLine("WARNING: Redis not started? \n" + rex.Message); - } - var mqHost = new RedisMqHostPool(redisFactory) - { - NoOfThreadsPerService = threadCount, - }; - return mqHost; - } - - [Test] - public void Can_receive_and_process_same_reply_responses() - { - var mqHost = CreateMqHostPool(threadCount: 3); - var called = 0; - - mqHost.RegisterHandler(m => - { - called++; - return new Incr { Value = m.GetBody().Value + 1 }; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Incr { Value = 1 }); - - Thread.Sleep(10000); - - Debug.WriteLine("Times called: " + called); - } - - [Test] - public void Can_receive_and_process_same_reply_responses_blocking() - { - var mqHost = CreateMqHostPool(threadCount: 5); - var called = 0; - - mqHost.RegisterHandler(m => - { - called++; - mqHost.CreateMessageQueueClient().Publish(new IncrBlocking { Value = m.GetBody().Value + 1 }); - Thread.Sleep(100); - return null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new IncrBlocking { Value = 1 }); - - Thread.Sleep(10000); - - Debug.WriteLine("Times called: " + called); - } - - [Test] - public void Can_receive_and_process_same_reply_responses_blocking_and_non_blocking() - { - var mqHost = CreateMqHostPool(); - var nonBlocking = 0; - var blocking = 0; - - mqHost.RegisterHandler(m => - { - nonBlocking++; - return new Incr { Value = m.GetBody().Value + 1 }; - }, 1); //Non-blocking less no of threads the better - - mqHost.RegisterHandler(m => - { - blocking++; - mqHost.CreateMessageQueueClient().Publish(new IncrBlocking { Value = m.GetBody().Value + 1 }); - Thread.Sleep(100); - return null; - }, 5); //Blocking, more threads == better - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Incr { Value = 1 }); - mqClient.Publish(new IncrBlocking { Value = 1 }); - - Thread.Sleep(10000); - - Debug.WriteLine("Times called: non-blocking: {0}, blocking: {1}".Fmt(nonBlocking, blocking)); - } - - [Test] - public void Test_Blocking_messages_throughput() - { - var mqHost = CreateMqHostPool(); - var blocking = 0; - const int BlockFor = 100; - const int NoOfThreads = 5; - const int SendEvery = BlockFor / NoOfThreads / 4; - - mqHost.RegisterHandler(m => - { - blocking++; - Thread.Sleep(BlockFor); - return null; - }, NoOfThreads); - - mqHost.Start(); - - var startedAt = DateTime.Now; - var mqClient = mqHost.CreateMessageQueueClient(); - while (DateTime.Now - startedAt < TimeSpan.FromSeconds(10)) - { - mqClient.Publish(new IncrBlocking { Value = 1 }); - Thread.Sleep(SendEvery); - } - - Debug.WriteLine("Times called: blocking: {0}".Fmt(blocking)); - } - - [Test] - public void Test_Blocking_and_NonBlocking_messages_throughput() - { - var mqHost = CreateMqHostPool(); - var nonBlocking = 0; - var blocking = 0; - const int BlockFor = 100; - const int NoOfThreads = 5; - const int SendBlockingMsgEvery = BlockFor / NoOfThreads / 4; - - mqHost.RegisterHandler(m => - { - nonBlocking++; - return null; - }, 3); - - mqHost.RegisterHandler(m => - { - blocking++; - Thread.Sleep(BlockFor); - return null; - }, NoOfThreads); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var stopWatch = Stopwatch.StartNew(); - long lastBlockingSentAtMs = 0; - - while (stopWatch.ElapsedMilliseconds < 10 * 1000) - { - mqClient.Publish(new Incr { Value = 1 }); - while (stopWatch.ElapsedMilliseconds - lastBlockingSentAtMs > SendBlockingMsgEvery) - { - mqClient.Publish(new IncrBlocking { Value = 1 }); - lastBlockingSentAtMs = stopWatch.ElapsedMilliseconds; - } - } - - Debug.WriteLine("Times called: non-blocking: {0}, blocking: {1}".Fmt(nonBlocking, blocking)); - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CacheClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/CacheClientTests.Async.cs new file mode 100644 index 00000000..e63437a0 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CacheClientTests.Async.cs @@ -0,0 +1,21 @@ +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class CacheClientTestsAsync + { + IRedisClientsManagerAsync redisManager = new RedisManagerPool(TestConfig.SingleHost); + + [Test] + public async Task Can_get_set_CacheClient_Async() + { + await using var cache = await redisManager.GetCacheClientAsync(); + await cache.FlushAllAsync(); + + await cache.SetAsync("key", "A"); + var result = await cache.GetAsync("key"); + Assert.That(result, Is.EqualTo("A")); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CacheClientTests.cs b/tests/ServiceStack.Redis.Tests/CacheClientTests.cs new file mode 100644 index 00000000..4d6f5b3d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CacheClientTests.cs @@ -0,0 +1,22 @@ +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class CacheClientTests + : RedisClientTestsBase + { + IRedisClientsManager redisManager = new RedisManagerPool(TestConfig.SingleHost); + + [Test] + public void Can_get_set_CacheClient() + { + var cache = redisManager.GetCacheClient(); + cache.FlushAll(); + + cache.Set("key", "A"); + var result = cache.Get("key"); + Assert.That(result, Is.EqualTo("A")); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ConfigTests.cs b/tests/ServiceStack.Redis.Tests/ConfigTests.cs new file mode 100644 index 00000000..cc84948a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/ConfigTests.cs @@ -0,0 +1,164 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class ConfigTests + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.VerifyMasterConnections = false; + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + RedisConfig.VerifyMasterConnections = true; + } + + [Test] + [TestCase("host", "{Host:host,Port:6379}")] + [TestCase("redis://host", "{Host:host,Port:6379}")] + [TestCase("host:1", "{Host:host,Port:1}")] + [TestCase("pass@host:1", "{Host:host,Port:1,Password:pass}")] + [TestCase("nunit:pass@host:1", "{Host:host,Port:1,Client:nunit,Password:pass}")] + [TestCase("host:1?password=pass&client=nunit", "{Host:host,Port:1,Client:nunit,Password:pass}")] + [TestCase("host:1?db=2", "{Host:host,Port:1,Db:2}")] + [TestCase("host?ssl=true", "{Host:host,Port:6380,Ssl:True}")] + [TestCase("host:6380?ssl=true&password=pass&sslprotocols=Tls12", "{Host:host,Port:6380,Ssl:True,Password:pass,SslProtocols:Tls12}")] + [TestCase("host:1?ssl=true", "{Host:host,Port:1,Ssl:True}")] + [TestCase("host:1?connectTimeout=1&sendtimeout=2&receiveTimeout=3&idletimeoutsecs=4", + "{Host:host,Port:1,ConnectTimeout:1,SendTimeout:2,ReceiveTimeout:3,IdleTimeOutSecs:4}")] + [TestCase("redis://nunit:pass@host:1?ssl=true&db=1&connectTimeout=2&sendtimeout=3&receiveTimeout=4&retryTimeout=5&idletimeoutsecs=5&NamespacePrefix=prefix.", + "{Host:host,Port:1,Ssl:True,Client:nunit,Password:pass,Db:1,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,RetryTimeout:5,IdleTimeOutSecs:5,NamespacePrefix:prefix.}")] + [TestCase("redis://nunit:pass@host:1?ssl=true&sslprotocols=Tls12&db=1&connectTimeout=2&sendtimeout=3&receiveTimeout=4&retryTimeout=5&idletimeoutsecs=5&NamespacePrefix=prefix.", + "{Host:host,Port:1,Ssl:True,Client:nunit,Password:pass,SslProtocols:Tls12,Db:1,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,RetryTimeout:5,IdleTimeOutSecs:5,NamespacePrefix:prefix.}")] + public void Does_handle_different_connection_strings_settings(string connString, string expectedJsv) + { + var actual = connString.ToRedisEndpoint(); + var expected = expectedJsv.FromJsv(); + + Assert.That(actual, Is.EqualTo(expected), + "{0} != {1}".Fmt(actual.ToJsv(), expected.ToJsv())); + } + + [Test] + [TestCase("host", "host:6379")] + [TestCase("redis://host", "host:6379")] + [TestCase("host:1", "host:1")] + [TestCase("pass@host:1", "host:1?Password=pass")] + [TestCase("nunit:pass@host:1", "host:1?Client=nunit&Password=pass")] + [TestCase("host:1?password=pass&client=nunit", "host:1?Client=nunit&Password=pass")] + [TestCase("host:1?db=2", "host:1?Db=2")] + [TestCase("host?ssl=true", "host:6380?Ssl=true")] + [TestCase("host:1?ssl=true", "host:1?Ssl=true")] + [TestCase("host:1?connectTimeout=1&sendtimeout=2&receiveTimeout=3&idletimeoutsecs=4", + "host:1?ConnectTimeout=1&SendTimeout=2&ReceiveTimeout=3&IdleTimeOutSecs=4")] + [TestCase("redis://nunit:pass@host:1?ssl=true&db=1&connectTimeout=2&sendtimeout=3&receiveTimeout=4&idletimeoutsecs=5&NamespacePrefix=prefix.", + "host:1?Client=nunit&Password=pass&Db=1&Ssl=true&ConnectTimeout=2&SendTimeout=3&ReceiveTimeout=4&IdleTimeOutSecs=5&NamespacePrefix=prefix.")] + [TestCase("password@host:6380?ssl=true&sslprotocols=Tls12", "host:6380?Password=password&Ssl=true&SslProtocols=Tls12")] + public void Does_Serialize_RedisEndpoint(string connString, string expectedString) + { + var actual = connString.ToRedisEndpoint(); + Assert.That(actual.ToString(), Is.EqualTo(expectedString)); + } + + [Test] + public void Does_set_all_properties_on_Client_using_ClientsManagers() + { + var connStr = "redis://nunit:pass@host:1?ssl=true&sslprotocols=Tls12&db=0&connectTimeout=2&sendtimeout=3&receiveTimeout=4&idletimeoutsecs=5&NamespacePrefix=prefix."; + var expected = "{Host:host,Port:1,Ssl:True,SslProtocols:Tls12,Client:nunit,Password:pass,Db:0,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,IdleTimeOutSecs:5,NamespacePrefix:prefix.}" + .FromJsv(); + + using (var pooledManager = new RedisManagerPool(connStr)) + { + AssertClientManager(pooledManager, expected); + } + using (var pooledManager = new PooledRedisClientManager(connStr)) + { + AssertClientManager(pooledManager, expected); + } + using (var basicManager = new BasicRedisClientManager(connStr)) + { + AssertClientManager(basicManager, expected); + } + } + + [Test] + public void Does_encode_values_when_serializing_to_ConnectionString() + { + var config = new RedisEndpoint + { + Host = "host", + Port = 1, + Password = "p@55W0rd=" + }; + + var connString = config.ToString(); + Assert.That(connString, Is.EqualTo("host:1?Password=p%4055W0rd%3d")); + + var fromConfig = connString.ToRedisEndpoint(); + Assert.That(fromConfig.Host, Is.EqualTo(config.Host)); + Assert.That(fromConfig.Port, Is.EqualTo(config.Port)); + Assert.That(fromConfig.Password, Is.EqualTo(config.Password)); + } + + private static void AssertClientManager(IRedisClientsManager redisManager, RedisEndpoint expected) + { + using (var readWrite = (RedisClient)redisManager.GetClient()) + using (var readOnly = (RedisClient)redisManager.GetReadOnlyClient()) + using (var cacheClientWrapper = (RedisClientManagerCacheClient)redisManager.GetCacheClient()) + { + AssertClient(readWrite, expected); + AssertClient(readOnly, expected); + + using (var cacheClient = (RedisClient)cacheClientWrapper.GetClient()) + { + AssertClient(cacheClient, expected); + } + } + } + + private static void AssertClient(RedisClient redis, RedisEndpoint expected) + { + Assert.That(redis.Host, Is.EqualTo(expected.Host)); + Assert.That(redis.Port, Is.EqualTo(expected.Port)); + Assert.That(redis.Ssl, Is.EqualTo(expected.Ssl)); + Assert.That(redis.SslProtocols, Is.EqualTo(expected.SslProtocols)); + Assert.That(redis.Client, Is.EqualTo(expected.Client)); + Assert.That(redis.Password, Is.EqualTo(expected.Password)); + Assert.That(redis.Db, Is.EqualTo(expected.Db)); + Assert.That(redis.ConnectTimeout, Is.EqualTo(expected.ConnectTimeout)); + Assert.That(redis.SendTimeout, Is.EqualTo(expected.SendTimeout)); + Assert.That(redis.ReceiveTimeout, Is.EqualTo(expected.ReceiveTimeout)); + Assert.That(redis.RetryTimeout, Is.EqualTo(expected.RetryTimeout)); + Assert.That(redis.IdleTimeOutSecs, Is.EqualTo(expected.IdleTimeOutSecs)); + Assert.That(redis.NamespacePrefix, Is.EqualTo(expected.NamespacePrefix)); + } + + [Test] + public void Does_set_Client_name_on_Connection() + { + using (var redis = new RedisClient(TestConfig.SingleHost + "?Client=nunit")) + { + var clientName = redis.GetClient(); + + Assert.That(clientName, Is.EqualTo("nunit")); + } + } + + [Test] + public void Does_set_Client_on_Pooled_Connection() + { + using (var redisManager = new PooledRedisClientManager(TestConfig.SingleHost + "?Client=nunit")) + using (var redis = redisManager.GetClient()) + { + var clientName = redis.GetClient(); + + Assert.That(clientName, Is.EqualTo("nunit")); + } + } + } +} diff --git a/tests/ServiceStack.Redis.Tests/CultureInfoTests.Async.cs b/tests/ServiceStack.Redis.Tests/CultureInfoTests.Async.cs new file mode 100644 index 00000000..12cc63f6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CultureInfoTests.Async.cs @@ -0,0 +1,47 @@ +using NUnit.Framework; +using System.Globalization; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class CultureInfoTestsAsync + : RedisClientTestsBaseAsync + { + private CultureInfo previousCulture = CultureInfo.InvariantCulture; + + [OneTimeSetUp] + public void OneTimeSetUp() + { +#if NETCORE + previousCulture = CultureInfo.CurrentCulture; + CultureInfo.CurrentCulture = new CultureInfo("fr-FR"); +#else + previousCulture = Thread.CurrentThread.CurrentCulture; + Thread.CurrentThread.CurrentCulture = new CultureInfo("fr-FR"); + Thread.CurrentThread.CurrentUICulture = new CultureInfo("fr-FR"); +#endif + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { +#if NETCORE + CultureInfo.CurrentCulture = previousCulture; +#else + Thread.CurrentThread.CurrentCulture = previousCulture; +#endif + } + + [Test] + public async Task Can_AddItemToSortedSet_in_different_Culture() + { + await RedisAsync.AddItemToSortedSetAsync("somekey1", "somevalue", 66121.202); + var score = await RedisAsync.GetItemScoreInSortedSetAsync("somekey1", "somevalue"); + + Assert.That(score, Is.EqualTo(66121.202)); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs b/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs index 5a8ea3ad..897391ff 100644 --- a/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs +++ b/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs @@ -4,34 +4,43 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class CultureInfoTests - : RedisClientTestsBase - { - private CultureInfo previousCulture = CultureInfo.InvariantCulture; + [TestFixture] + public class CultureInfoTests + : RedisClientTestsBase + { + private CultureInfo previousCulture = CultureInfo.InvariantCulture; - [TestFixtureSetUp] - public void TestFixtureSetUp() - { - previousCulture = Thread.CurrentThread.CurrentCulture; - Thread.CurrentThread.CurrentCulture = new CultureInfo("fr-FR"); - Thread.CurrentThread.CurrentUICulture = new CultureInfo("fr-FR"); - } + [OneTimeSetUp] + public void OneTimeSetUp() + { +#if NETCORE + previousCulture = CultureInfo.CurrentCulture; + CultureInfo.CurrentCulture = new CultureInfo("fr-FR"); +#else + previousCulture = Thread.CurrentThread.CurrentCulture; + Thread.CurrentThread.CurrentCulture = new CultureInfo("fr-FR"); + Thread.CurrentThread.CurrentUICulture = new CultureInfo("fr-FR"); +#endif + } - [TestFixtureTearDown] - public void TestFixtureTearDown() - { - Thread.CurrentThread.CurrentCulture = previousCulture; - } + [OneTimeTearDown] + public void OneTimeTearDown() + { +#if NETCORE + CultureInfo.CurrentCulture = previousCulture; +#else + Thread.CurrentThread.CurrentCulture = previousCulture; +#endif + } - [Test] - public void Can_AddItemToSortedSet_in_different_Culture() - { - Redis.AddItemToSortedSet("somekey1", "somevalue", 66121.202); - var score = Redis.GetItemScoreInSortedSet("somekey1", "somevalue"); + [Test] + public void Can_AddItemToSortedSet_in_different_Culture() + { + Redis.AddItemToSortedSet("somekey1", "somevalue", 66121.202); + var score = Redis.GetItemScoreInSortedSet("somekey1", "somevalue"); - Assert.That(score, Is.EqualTo(66121.202)); - } + Assert.That(score, Is.EqualTo(66121.202)); + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CustomCommandTests.Async.cs b/tests/ServiceStack.Redis.Tests/CustomCommandTests.Async.cs new file mode 100644 index 00000000..3033ed5c --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CustomCommandTests.Async.cs @@ -0,0 +1,140 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Text; +using System; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class CustomCommandTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_send_custom_commands() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync("SET", "foo", 1); + Assert.That(ret.Text, Is.EqualTo("OK")); + _ = await RedisAsync.CustomAsync(Commands.Set, "bar", "b"); + + ret = await RedisAsync.CustomAsync("GET", "foo"); + Assert.That(ret.Text, Is.EqualTo("1")); + ret = await RedisAsync.CustomAsync(Commands.Get, "bar"); + Assert.That(ret.Text, Is.EqualTo("b")); + + ret = await RedisAsync.CustomAsync(Commands.Keys, "*"); + var keys = ret.GetResults(); + Assert.That(keys, Is.EquivalentTo(new[] { "foo", "bar" })); + + ret = await RedisAsync.CustomAsync("MGET", "foo", "bar"); + var values = ret.GetResults(); + Assert.That(values, Is.EquivalentTo(new[] { "1", "b" })); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + { + await RedisAsync.CustomAsync("RPUSH", "DaysOfWeek", x); + } + + ret = await RedisAsync.CustomAsync("LRANGE", "DaysOfWeek", 1, -2); + + var weekDays = ret.GetResults(); + Assert.That(weekDays, Is.EquivalentTo( + new[] { "Monday", "Tuesday", "Wednesday", "Thursday", "Friday" })); + + ret.PrintDump(); + } + + [Test] + public async Task Can_send_custom_commands_longhand() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync(new object[] { "SET", "foo", 1 }); + Assert.That(ret.Text, Is.EqualTo("OK")); + _ = await RedisAsync.CustomAsync(new object[] { Commands.Set, "bar", "b" }); + + ret = await RedisAsync.CustomAsync(new object[] { "GET", "foo" }); + Assert.That(ret.Text, Is.EqualTo("1")); + ret = await RedisAsync.CustomAsync(new object[] { Commands.Get, "bar" }); + Assert.That(ret.Text, Is.EqualTo("b")); + + ret = await RedisAsync.CustomAsync(new object[] { Commands.Keys, "*" }); + var keys = ret.GetResults(); + Assert.That(keys, Is.EquivalentTo(new[] { "foo", "bar" })); + + ret = await RedisAsync.CustomAsync(new object[] { "MGET", "foo", "bar" }); + var values = ret.GetResults(); + Assert.That(values, Is.EquivalentTo(new[] { "1", "b" })); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + { + await RedisAsync.CustomAsync(new object[] { "RPUSH", "DaysOfWeek", x }); + } + + ret = await RedisAsync.CustomAsync(new object[] { "LRANGE", "DaysOfWeek", 1, -2 }); + + var weekDays = ret.GetResults(); + Assert.That(weekDays, Is.EquivalentTo( + new[] { "Monday", "Tuesday", "Wednesday", "Thursday", "Friday" })); + + ret.PrintDump(); + } + + [Test] + public async Task Can_send_complex_types_in_Custom_Commands() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync("SET", "foo", new Poco { Name = "Bar" }); + Assert.That(ret.Text, Is.EqualTo("OK")); + + ret = await RedisAsync.CustomAsync("GET", "foo"); + var dto = ret.GetResult(); + Assert.That(dto.Name, Is.EqualTo("Bar")); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + await RedisAsync.CustomAsync("RPUSH", "DaysOfWeek", new Poco { Name = x }); + + ret = await RedisAsync.CustomAsync("LRANGE", "DaysOfWeek", 1, -2); + var weekDays = ret.GetResults(); + + Assert.That(weekDays.First().Name, Is.EqualTo("Monday")); + + ret.PrintDump(); + } + + [Test] + public async Task Can_send_complex_types_in_Custom_Commands_longhand() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync(new object[] { "SET", "foo", new Poco { Name = "Bar" } }); + Assert.That(ret.Text, Is.EqualTo("OK")); + + ret = await RedisAsync.CustomAsync(new object[] { "GET", "foo" }); + var dto = ret.GetResult(); + Assert.That(dto.Name, Is.EqualTo("Bar")); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + await RedisAsync.CustomAsync(new object[] { "RPUSH", "DaysOfWeek", new Poco { Name = x } }); + + ret = await RedisAsync.CustomAsync(new object[] { "LRANGE", "DaysOfWeek", 1, -2 }); + var weekDays = ret.GetResults(); + + Assert.That(weekDays.First().Name, Is.EqualTo("Monday")); + + ret.PrintDump(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CustomCommandTests.cs b/tests/ServiceStack.Redis.Tests/CustomCommandTests.cs new file mode 100644 index 00000000..1a760461 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CustomCommandTests.cs @@ -0,0 +1,75 @@ +using System; +using System.Linq; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + [TestFixture] + public class CustomCommandTests + : RedisClientTestsBase + { + [Test] + public void Can_send_custom_commands() + { + Redis.FlushAll(); + + RedisText ret; + + ret = Redis.Custom("SET", "foo", 1); + Assert.That(ret.Text, Is.EqualTo("OK")); + ret = Redis.Custom(Commands.Set, "bar", "b"); + + ret = Redis.Custom("GET", "foo"); + Assert.That(ret.Text, Is.EqualTo("1")); + ret = Redis.Custom(Commands.Get, "bar"); + Assert.That(ret.Text, Is.EqualTo("b")); + + ret = Redis.Custom(Commands.Keys, "*"); + var keys = ret.GetResults(); + Assert.That(keys, Is.EquivalentTo(new[] { "foo", "bar" })); + + ret = Redis.Custom("MGET", "foo", "bar"); + var values = ret.GetResults(); + Assert.That(values, Is.EquivalentTo(new[] { "1", "b" })); + + Enum.GetNames(typeof(DayOfWeek)).ToList() + .ForEach(x => Redis.Custom("RPUSH", "DaysOfWeek", x)); + + ret = Redis.Custom("LRANGE", "DaysOfWeek", 1, -2); + + var weekDays = ret.GetResults(); + Assert.That(weekDays, Is.EquivalentTo( + new[] { "Monday", "Tuesday", "Wednesday", "Thursday", "Friday" })); + + ret.PrintDump(); + } + + [Test] + public void Can_send_complex_types_in_Custom_Commands() + { + Redis.FlushAll(); + + RedisText ret; + + ret = Redis.Custom("SET", "foo", new Poco { Name = "Bar" }); + Assert.That(ret.Text, Is.EqualTo("OK")); + + ret = Redis.Custom("GET", "foo"); + var dto = ret.GetResult(); + Assert.That(dto.Name, Is.EqualTo("Bar")); + + Enum.GetNames(typeof(DayOfWeek)).ToList() + .ForEach(x => Redis.Custom("RPUSH", "DaysOfWeek", new Poco { Name = x })); + + ret = Redis.Custom("LRANGE", "DaysOfWeek", 1, -2); + var weekDays = ret.GetResults(); + + Assert.That(weekDays.First().Name, Is.EqualTo("Monday")); + + ret.PrintDump(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs b/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs index aedcd265..5d511398 100644 --- a/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs +++ b/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs @@ -1,12 +1,11 @@ using System; using System.Diagnostics; using NUnit.Framework; -using ServiceStack.Common; using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [Explicit("Diagnostic only Integration Test")] + [Ignore("Diagnostic only Integration Test")] [TestFixture] public class DiagnosticTests { @@ -42,17 +41,17 @@ public void Test_Throughput() Count.Times(x => { var sw = Stopwatch.StartNew(); - + redisClient.Set(key, bytes); bytesSent += bytes.Length; "SEND {0} bytes in {1}ms".Print(bytes.Length, sw.ElapsedMilliseconds); - + sw.Reset(); sw.Start(); var receivedBytes = redisClient.Get(key); bytesRecv += receivedBytes.Length; "RECV {0} bytes in {1}ms".Print(receivedBytes.Length, sw.ElapsedMilliseconds); - + "TOTAL {0} bytes SENT {0} RECV {1} in {2}ms\n".Print( bytesSent, bytesRecv, swTotal.ElapsedMilliseconds); }); diff --git a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs index 5b294760..1cfa8e05 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs @@ -4,374 +4,356 @@ using System.Linq; using NUnit.Framework; using ServiceStack.Common; -using ServiceStack.Common.Extensions; using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Examples.BestPractice { - /// - /// A complete, self-contained example showing how to create a basic blog application using Redis. - /// - - #region Blog Models - - public class User - : IHasBlogRepository - { - public IBlogRepository Repository { private get; set; } - - public User() - { - this.BlogIds = new List(); - } - - public long Id { get; set; } - public string Name { get; set; } - public List BlogIds { get; set; } - - public List GetBlogs() - { - return this.Repository.GetBlogs(this.BlogIds); - } - - public Blog CreateNewBlog(Blog blog) - { - this.Repository.StoreBlogs(this, blog); - - return blog; - } - } - - public class Blog - : IHasBlogRepository - { - public IBlogRepository Repository { private get; set; } - - public Blog() - { - this.Tags = new List(); - this.BlogPostIds = new List(); - } - - public long Id { get; set; } - public long UserId { get; set; } - public string UserName { get; set; } - public List Tags { get; set; } - public List BlogPostIds { get; set; } - - public List GetBlogPosts() - { - return this.Repository.GetBlogPosts(this.BlogPostIds); - } - - public void StoreNewBlogPosts(params BlogPost[] blogPosts) - { - this.Repository.StoreNewBlogPosts(this, blogPosts); - } - } - - public class BlogPost - { - public BlogPost() - { - this.Categories = new List(); - this.Tags = new List(); - this.Comments = new List(); - } - - public long Id { get; set; } - public long BlogId { get; set; } - public string Title { get; set; } - public string Content { get; set; } - public List Categories { get; set; } - public List Tags { get; set; } - public List Comments { get; set; } - } - - public class BlogPostComment - { - public string Content { get; set; } - public DateTime CreatedDate { get; set; } - } - #endregion - - - #region Blog Repository - public interface IHasBlogRepository - { - IBlogRepository Repository { set; } - } - - public class BlogRepository - : IBlogRepository - { - const string CategoryTypeName = "Category"; - const string TagCloudKey = "urn:TagCloud"; - const string AllCategoriesKey = "urn:Categories"; - const string RecentBlogPostsKey = "urn:BlogPosts:RecentPosts"; - const string RecentBlogPostCommentsKey = "urn:BlogPostComment:RecentComments"; - - public BlogRepository(IRedisClient client) - { - this.redisClient = client; - } - - private readonly IRedisClient redisClient; - - public void StoreUsers(params User[] users) - { - using (var userClient = redisClient.GetTypedClient()) - { - Inject(users); - users.Where(x => x.Id == default(int)) - .ForEach(x => x.Id = userClient.GetNextSequence()); - - userClient.StoreAll(users); - } - } - - public List GetAllUsers() - { - using (var userClient = redisClient.GetTypedClient()) - { - return Inject(userClient.GetAll()); - } - } - - public void StoreBlogs(User user, params Blog[] blogs) - { - using (var redisBlogs = redisClient.GetTypedClient()) - { - foreach (var blog in blogs) - { - blog.Id = blog.Id != default(int) ? blog.Id : redisBlogs.GetNextSequence(); - blog.UserId = user.Id; - blog.UserName = user.Name; - - user.BlogIds.AddIfNotExists(blog.Id); - } - - using (var trans = redisClient.CreateTransaction()) - { - trans.QueueCommand(x => x.Store(user)); - trans.QueueCommand(x => x.StoreAll(blogs)); - - trans.Commit(); - } - - Inject(blogs); - } - } - - public List GetBlogs(IEnumerable blogIds) - { - using (var redisBlogs = redisClient.GetTypedClient()) - { - return Inject( - redisBlogs.GetByIds(blogIds.ConvertAll(x => x.ToString()))); - } - } - - public List GetAllBlogs() - { - using (var redisBlogs = redisClient.GetTypedClient()) - { - return Inject(redisBlogs.GetAll()); - } - } - - public List GetBlogPosts(IEnumerable blogPostIds) - { - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - return redisBlogPosts.GetByIds(blogPostIds.ConvertAll(x => x.ToString())).ToList(); - } - } - - public void StoreNewBlogPosts(Blog blog, params BlogPost[] blogPosts) - { - using (var redisBlogPosts = redisClient.GetTypedClient()) - using (var redisComments = redisClient.GetTypedClient()) - { - //Get wrapper around a strongly-typed Redis server-side List - var recentPosts = redisBlogPosts.Lists[RecentBlogPostsKey]; - var recentComments = redisComments.Lists[RecentBlogPostCommentsKey]; - - foreach (var blogPost in blogPosts) - { - blogPost.Id = blogPost.Id != default(int) ? blogPost.Id : redisBlogPosts.GetNextSequence(); - blogPost.BlogId = blog.Id; - blog.BlogPostIds.AddIfNotExists(blogPost.Id); - - //List of Recent Posts and comments - recentPosts.Prepend(blogPost); - blogPost.Comments.ForEach(recentComments.Prepend); - - //Tag Cloud - blogPost.Tags.ForEach(x => - redisClient.IncrementItemInSortedSet(TagCloudKey, x, 1)); - - //List of all post categories - blogPost.Categories.ForEach(x => - redisClient.AddItemToSet(AllCategoriesKey, x)); - - //Map of Categories to BlogPost Ids - blogPost.Categories.ForEach(x => - redisClient.AddItemToSet(UrnId.Create(CategoryTypeName, x), blogPost.Id.ToString())); - } - - //Rolling list of recent items, only keep the last 5 - recentPosts.Trim(0, 4); - recentComments.Trim(0, 4); - - using (var trans = redisClient.CreateTransaction()) - { - trans.QueueCommand(x => x.Store(blog)); - trans.QueueCommand(x => x.StoreAll(blogPosts)); - - trans.Commit(); - } - } - } - - public List GetRecentBlogPosts() - { - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - return redisBlogPosts.Lists[RecentBlogPostsKey].GetAll(); - } - } - - public List GetRecentBlogPostComments() - { - using (var redisComments = redisClient.GetTypedClient()) - { - return redisComments.Lists[RecentBlogPostCommentsKey].GetAll(); - } - } - - public IDictionary GetTopTags(int take) - { - return redisClient.GetRangeWithScoresFromSortedSetDesc(TagCloudKey, 0, take - 1); - } - - public HashSet GetAllCategories() - { - return redisClient.GetAllItemsFromSet(AllCategoriesKey); - } - - public void StoreBlogPost(BlogPost blogPost) - { - redisClient.Store(blogPost); - } - - public BlogPost GetBlogPost(int postId) - { - return redisClient.GetById(postId); - } - - public List GetBlogPostsByCategory(string categoryName) - { - var categoryUrn = UrnId.Create(CategoryTypeName, categoryName); - var documentDbPostIds = redisClient.GetAllItemsFromSet(categoryUrn); - - return redisClient.GetByIds(documentDbPostIds.ToArray()).ToList(); - } - - public List Inject(IEnumerable entities) - where T : IHasBlogRepository - { - var entitiesList = entities.ToList(); - entitiesList.ForEach(x => x.Repository = this); - return entitiesList; - } - - } - #endregion - - - [TestFixture, Explicit, Category("Integration")] - public class BlogPostBestPractice - { - readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); - private IBlogRepository repository; - - [SetUp] - public void OnBeforeEachTest() - { - redisClient.FlushAll(); - repository = new BlogRepository(redisClient); - - InsertTestData(repository); - } - - public static void InsertTestData(IBlogRepository repository) - { - var ayende = new User { Name = "ayende" }; - var mythz = new User { Name = "mythz" }; - - repository.StoreUsers(ayende, mythz); - - var ayendeBlog = ayende.CreateNewBlog(new Blog { Tags = { "Architecture", ".NET", "Databases" } }); - - var mythzBlog = mythz.CreateNewBlog(new Blog { Tags = { "Architecture", ".NET", "Databases" }}); - - ayendeBlog.StoreNewBlogPosts(new BlogPost - { - Title = "RavenDB", - Categories = new List { "NoSQL", "DocumentDB" }, - Tags = new List { "Raven", "NoSQL", "JSON", ".NET" }, - Comments = new List - { - new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,}, - new BlogPostComment { Content = "Second Comment!", CreatedDate = DateTime.UtcNow,}, - } - }, - new BlogPost - { - BlogId = ayendeBlog.Id, - Title = "Cassandra", - Categories = new List { "NoSQL", "Cluster" }, - Tags = new List { "Cassandra", "NoSQL", "Scalability", "Hashing" }, - Comments = new List - { - new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} - } - }); - - mythzBlog.StoreNewBlogPosts( - new BlogPost - { - Title = "Redis", - Categories = new List { "NoSQL", "Cache" }, - Tags = new List { "Redis", "NoSQL", "Scalability", "Performance" }, - Comments = new List - { - new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} - } - }, - new BlogPost - { - Title = "Couch Db", - Categories = new List { "NoSQL", "DocumentDB" }, - Tags = new List { "CouchDb", "NoSQL", "JSON" }, - Comments = new List - { - new BlogPostComment {Content = "First Comment!", CreatedDate = DateTime.UtcNow,} - } - }); - } - - [Test] - public void View_test_data() - { - var mythz = repository.GetAllUsers().First(x => x.Name == "mythz"); - var mythzBlogPostIds = mythz.GetBlogs().SelectMany(x => x.BlogPostIds); - var mythzBlogPosts = repository.GetBlogPosts(mythzBlogPostIds); - - Debug.WriteLine(mythzBlogPosts.Dump()); - /* Output: + /// + /// A complete, self-contained example showing how to create a basic blog application using Redis. + /// + + #region Blog Models + + public class User + : IHasBlogRepository + { + public IBlogRepository Repository { private get; set; } + + public User() + { + this.BlogIds = new List(); + } + + public long Id { get; set; } + public string Name { get; set; } + public List BlogIds { get; set; } + + public List GetBlogs() + { + return this.Repository.GetBlogs(this.BlogIds); + } + + public Blog CreateNewBlog(Blog blog) + { + this.Repository.StoreBlogs(this, blog); + + return blog; + } + } + + public class Blog + : IHasBlogRepository + { + public IBlogRepository Repository { private get; set; } + + public Blog() + { + this.Tags = new List(); + this.BlogPostIds = new List(); + } + + public long Id { get; set; } + public long UserId { get; set; } + public string UserName { get; set; } + public List Tags { get; set; } + public List BlogPostIds { get; set; } + + public List GetBlogPosts() + { + return this.Repository.GetBlogPosts(this.BlogPostIds); + } + + public void StoreNewBlogPosts(params BlogPost[] blogPosts) + { + this.Repository.StoreNewBlogPosts(this, blogPosts); + } + } + + public class BlogPost + { + public BlogPost() + { + this.Categories = new List(); + this.Tags = new List(); + this.Comments = new List(); + } + + public long Id { get; set; } + public long BlogId { get; set; } + public string Title { get; set; } + public string Content { get; set; } + public List Categories { get; set; } + public List Tags { get; set; } + public List Comments { get; set; } + } + + public class BlogPostComment + { + public string Content { get; set; } + public DateTime CreatedDate { get; set; } + } + #endregion + + + #region Blog Repository + public interface IHasBlogRepository + { + IBlogRepository Repository { set; } + } + + public class BlogRepository + : IBlogRepository + { + const string CategoryTypeName = "Category"; + const string TagCloudKey = "urn:TagCloud"; + const string AllCategoriesKey = "urn:Categories"; + const string RecentBlogPostsKey = "urn:BlogPosts:RecentPosts"; + const string RecentBlogPostCommentsKey = "urn:BlogPostComment:RecentComments"; + + public BlogRepository(IRedisClient client) + { + this.redis = client; + } + + private readonly IRedisClient redis; + + public void StoreUsers(params User[] users) + { + var redisUsers = redis.As(); + Inject(users); + users.Where(x => x.Id == default(int)) + .Each(x => x.Id = redisUsers.GetNextSequence()); + + redisUsers.StoreAll(users); + } + + public List GetAllUsers() + { + var redisUsers = redis.As(); + return Inject(redisUsers.GetAll()); + } + + public void StoreBlogs(User user, params Blog[] blogs) + { + var redisBlogs = redis.As(); + foreach (var blog in blogs) + { + blog.Id = blog.Id != default(int) ? blog.Id : redisBlogs.GetNextSequence(); + blog.UserId = user.Id; + blog.UserName = user.Name; + + user.BlogIds.AddIfNotExists(blog.Id); + } + + using (var trans = redis.CreateTransaction()) + { + trans.QueueCommand(x => x.Store(user)); + trans.QueueCommand(x => x.StoreAll(blogs)); + + trans.Commit(); + } + + Inject(blogs); + } + + public List GetBlogs(IEnumerable blogIds) + { + var redisBlogs = redis.As(); + return Inject( + redisBlogs.GetByIds(blogIds.Map(x => x.ToString()))); + } + + public List GetAllBlogs() + { + var redisBlogs = redis.As(); + return Inject(redisBlogs.GetAll()); + } + + public List GetBlogPosts(IEnumerable blogPostIds) + { + var redisBlogPosts = redis.As(); + return redisBlogPosts.GetByIds(blogPostIds.Map(x => x.ToString())).ToList(); + } + + public void StoreNewBlogPosts(Blog blog, params BlogPost[] blogPosts) + { + var redisBlogPosts = redis.As(); + var redisComments = redis.As(); + + //Get wrapper around a strongly-typed Redis server-side List + var recentPosts = redisBlogPosts.Lists[RecentBlogPostsKey]; + var recentComments = redisComments.Lists[RecentBlogPostCommentsKey]; + + foreach (var blogPost in blogPosts) + { + blogPost.Id = blogPost.Id != default(int) ? blogPost.Id : redisBlogPosts.GetNextSequence(); + blogPost.BlogId = blog.Id; + blog.BlogPostIds.AddIfNotExists(blogPost.Id); + + //List of Recent Posts and comments + recentPosts.Prepend(blogPost); + blogPost.Comments.ForEach(recentComments.Prepend); + + //Tag Cloud + blogPost.Tags.ForEach(x => + redis.IncrementItemInSortedSet(TagCloudKey, x, 1)); + + //List of all post categories + blogPost.Categories.ForEach(x => + redis.AddItemToSet(AllCategoriesKey, x)); + + //Map of Categories to BlogPost Ids + blogPost.Categories.ForEach(x => + redis.AddItemToSet(UrnId.Create(CategoryTypeName, x), blogPost.Id.ToString())); + } + + //Rolling list of recent items, only keep the last 5 + recentPosts.Trim(0, 4); + recentComments.Trim(0, 4); + + using (var trans = redis.CreateTransaction()) + { + trans.QueueCommand(x => x.Store(blog)); + trans.QueueCommand(x => x.StoreAll(blogPosts)); + + trans.Commit(); + } + } + + public List GetRecentBlogPosts() + { + var redisBlogPosts = redis.As(); + return redisBlogPosts.Lists[RecentBlogPostsKey].GetAll(); + } + + public List GetRecentBlogPostComments() + { + var redisComments = redis.As(); + return redisComments.Lists[RecentBlogPostCommentsKey].GetAll(); + } + + public IDictionary GetTopTags(int take) + { + return redis.GetRangeWithScoresFromSortedSetDesc(TagCloudKey, 0, take - 1); + } + + public HashSet GetAllCategories() + { + return redis.GetAllItemsFromSet(AllCategoriesKey); + } + + public void StoreBlogPost(BlogPost blogPost) + { + redis.Store(blogPost); + } + + public BlogPost GetBlogPost(int postId) + { + return redis.GetById(postId); + } + + public List GetBlogPostsByCategory(string categoryName) + { + var categoryUrn = UrnId.Create(CategoryTypeName, categoryName); + var documentDbPostIds = redis.GetAllItemsFromSet(categoryUrn); + + return redis.GetByIds(documentDbPostIds.ToArray()).ToList(); + } + + public List Inject(IEnumerable entities) + where T : IHasBlogRepository + { + var entitiesList = entities.ToList(); + entitiesList.ForEach(x => x.Repository = this); + return entitiesList; + } + + } + #endregion + + + [TestFixture, Ignore("Integration"), Category("Integration")] + public class BlogPostBestPractice + { + readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); + private IBlogRepository repository; + + [SetUp] + public void OnBeforeEachTest() + { + redisClient.FlushAll(); + repository = new BlogRepository(redisClient); + + InsertTestData(repository); + } + + public static void InsertTestData(IBlogRepository repository) + { + var ayende = new User { Name = "ayende" }; + var mythz = new User { Name = "mythz" }; + + repository.StoreUsers(ayende, mythz); + + var ayendeBlog = ayende.CreateNewBlog(new Blog { Tags = { "Architecture", ".NET", "Databases" } }); + + var mythzBlog = mythz.CreateNewBlog(new Blog { Tags = { "Architecture", ".NET", "Databases" } }); + + ayendeBlog.StoreNewBlogPosts(new BlogPost + { + Title = "RavenDB", + Categories = new List { "NoSQL", "DocumentDB" }, + Tags = new List { "Raven", "NoSQL", "JSON", ".NET" }, + Comments = new List + { + new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,}, + new BlogPostComment { Content = "Second Comment!", CreatedDate = DateTime.UtcNow,}, + } + }, + new BlogPost + { + BlogId = ayendeBlog.Id, + Title = "Cassandra", + Categories = new List { "NoSQL", "Cluster" }, + Tags = new List { "Cassandra", "NoSQL", "Scalability", "Hashing" }, + Comments = new List + { + new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} + } + }); + + mythzBlog.StoreNewBlogPosts( + new BlogPost + { + Title = "Redis", + Categories = new List { "NoSQL", "Cache" }, + Tags = new List { "Redis", "NoSQL", "Scalability", "Performance" }, + Comments = new List + { + new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} + } + }, + new BlogPost + { + Title = "Couch Db", + Categories = new List { "NoSQL", "DocumentDB" }, + Tags = new List { "CouchDb", "NoSQL", "JSON" }, + Comments = new List + { + new BlogPostComment {Content = "First Comment!", CreatedDate = DateTime.UtcNow,} + } + }); + } + + [Test] + public void View_test_data() + { + var mythz = repository.GetAllUsers().First(x => x.Name == "mythz"); + var mythzBlogPostIds = mythz.GetBlogs().SelectMany(x => x.BlogPostIds); + var mythzBlogPosts = repository.GetBlogPosts(mythzBlogPostIds); + + Debug.WriteLine(mythzBlogPosts.Dump()); + /* Output: [ { Id: 3, @@ -422,14 +404,14 @@ public void View_test_data() } ] */ - } - - [Test] - public void Show_a_list_of_blogs() - { - var blogs = repository.GetAllBlogs(); - Debug.WriteLine(blogs.Dump()); - /* Output: + } + + [Test] + public void Show_a_list_of_blogs() + { + var blogs = repository.GetAllBlogs(); + Debug.WriteLine(blogs.Dump()); + /* Output: [ { Id: 1, @@ -465,18 +447,18 @@ public void Show_a_list_of_blogs() } ] */ - } - - [Test] - public void Show_a_list_of_recent_posts_and_comments() - { - //Recent posts are already maintained in the repository - var recentPosts = repository.GetRecentBlogPosts(); - var recentComments = repository.GetRecentBlogPostComments(); - - Debug.WriteLine("Recent Posts:\n" + recentPosts.Dump()); - Debug.WriteLine("Recent Comments:\n" + recentComments.Dump()); - /* + } + + [Test] + public void Show_a_list_of_recent_posts_and_comments() + { + //Recent posts are already maintained in the repository + var recentPosts = repository.GetRecentBlogPosts(); + var recentComments = repository.GetRecentBlogPostComments(); + + Debug.WriteLine("Recent Posts:\n" + recentPosts.Dump()); + Debug.WriteLine("Recent Comments:\n" + recentComments.Dump()); + /* Recent Posts: [ { @@ -605,15 +587,15 @@ public void Show_a_list_of_recent_posts_and_comments() ] */ - } - - [Test] - public void Show_a_TagCloud() - { - //Tags are maintained in the repository - var tagCloud = repository.GetTopTags(5); - Debug.WriteLine(tagCloud.Dump()); - /* Output: + } + + [Test] + public void Show_a_TagCloud() + { + //Tags are maintained in the repository + var tagCloud = repository.GetTopTags(5); + Debug.WriteLine(tagCloud.Dump()); + /* Output: [ [ NoSQL, @@ -637,15 +619,15 @@ public void Show_a_TagCloud() ] ] */ - } - - [Test] - public void Show_all_Categories() - { - //Categories are maintained in the repository - var allCategories = repository.GetAllCategories(); - Debug.WriteLine(allCategories.Dump()); - /* Output: + } + + [Test] + public void Show_all_Categories() + { + //Categories are maintained in the repository + var allCategories = repository.GetAllCategories(); + Debug.WriteLine(allCategories.Dump()); + /* Output: [ DocumentDB, NoSQL, @@ -653,15 +635,15 @@ public void Show_all_Categories() Cache ] */ - } - - [Test] - public void Show_post_and_all_comments() - { - var postId = 1; - var blogPost = repository.GetBlogPost(postId); - Debug.WriteLine(blogPost.Dump()); - /* Output: + } + + [Test] + public void Show_post_and_all_comments() + { + var postId = 1; + var blogPost = repository.GetBlogPost(postId); + Debug.WriteLine(blogPost.Dump()); + /* Output: { Id: 1, BlogId: 1, @@ -691,22 +673,22 @@ public void Show_post_and_all_comments() ] } */ - } - - [Test] - public void Add_comment_to_existing_post() - { - var postId = 1; - var blogPost = repository.GetBlogPost(postId); - - blogPost.Comments.Add( - new BlogPostComment { Content = "Third Comment!", CreatedDate = DateTime.UtcNow }); - - repository.StoreBlogPost(blogPost); - - var refreshBlogPost = repository.GetBlogPost(postId); - Debug.WriteLine(refreshBlogPost.Dump()); - /* Output: + } + + [Test] + public void Add_comment_to_existing_post() + { + var postId = 1; + var blogPost = repository.GetBlogPost(postId); + + blogPost.Comments.Add( + new BlogPostComment { Content = "Third Comment!", CreatedDate = DateTime.UtcNow }); + + repository.StoreBlogPost(blogPost); + + var refreshBlogPost = repository.GetBlogPost(postId); + Debug.WriteLine(refreshBlogPost.Dump()); + /* Output: { Id: 1, BlogId: 1, @@ -740,14 +722,14 @@ public void Add_comment_to_existing_post() ] } */ - } - - [Test] - public void Show_all_Posts_for_a_Category() - { - var documentDbPosts = repository.GetBlogPostsByCategory("DocumentDB"); - Debug.WriteLine(documentDbPosts.Dump()); - /* Output: + } + + [Test] + public void Show_all_Posts_for_a_Category() + { + var documentDbPosts = repository.GetBlogPostsByCategory("DocumentDB"); + Debug.WriteLine(documentDbPosts.Dump()); + /* Output: [ { Id: 4, @@ -802,7 +784,7 @@ public void Show_all_Posts_for_a_Category() } ] */ - } + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs index c67a94b8..d868ccd2 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs @@ -2,7 +2,7 @@ using System.Collections.Generic; using System.Diagnostics; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Text; //Shows how to easily migrated data from an old version of the schema to a new one @@ -10,323 +10,317 @@ //New schema types used in this example namespace ServiceStack.Redis.Tests.Examples.BestPractice.New { - public class BlogPost - { - public BlogPost() - { - this.Labels = new List(); - this.Tags = new HashSet(); - this.Comments = new List>(); - } + public class BlogPost + { + public BlogPost() + { + this.Labels = new List(); + this.Tags = new HashSet(); + this.Comments = new List>(); + } - //Changed int types to both a long and a double type - public long Id { get; set; } - public double BlogId { get; set; } + //Changed int types to both a long and a double type + public long Id { get; set; } + public double BlogId { get; set; } - //Added new field - public BlogPostType PostType { get; set; } + //Added new field + public BlogPostType PostType { get; set; } - public string Title { get; set; } - public string Content { get; set; } + public string Title { get; set; } + public string Content { get; set; } - //Renamed from 'Categories' to 'Labels' - public List Labels { get; set; } + //Renamed from 'Categories' to 'Labels' + public List Labels { get; set; } - //Changed from List to a HashSet - public HashSet Tags { get; set; } + //Changed from List to a HashSet + public HashSet Tags { get; set; } - //Changed from List of strongly-typed 'BlogPostComment' to loosely-typed string map - public List> Comments { get; set; } + //Changed from List of strongly-typed 'BlogPostComment' to loosely-typed string map + public List> Comments { get; set; } - //Added pointless calculated field - public int? NoOfComments { get; set; } - } + //Added pointless calculated field + public int? NoOfComments { get; set; } + } - public enum BlogPostType - { - None, - Article, - Summary, - } + public enum BlogPostType + { + None, + Article, + Summary, + } } namespace ServiceStack.Redis.Tests.Examples.BestPractice { - [TestFixture, Explicit, Category("Integration")] - public class BlogPostMigrations - { - readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); + [TestFixture, Ignore("Integration"), Category("Integration")] + public class BlogPostMigrations + { + readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); - [SetUp] - public void OnBeforeEachTest() - { - redisClient.FlushAll(); - } + [SetUp] + public void OnBeforeEachTest() + { + redisClient.FlushAll(); + } - [Test] - public void Automatically_migrate_to_new_Schema() - { - var repository = new BlogRepository(redisClient); + [Test] + public void Automatically_migrate_to_new_Schema() + { + var repository = new BlogRepository(redisClient); - //Populate the datastore with the old schema from the 'BlogPostBestPractice' - BlogPostBestPractice.InsertTestData(repository); + //Populate the datastore with the old schema from the 'BlogPostBestPractice' + BlogPostBestPractice.InsertTestData(repository); - //Create a typed-client based on the new schema - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - //Automatically retrieve blog posts - IList allBlogPosts = redisBlogPosts.GetAll(); + //Create a typed-client based on the new schema + var redisBlogPosts = redisClient.As(); + //Automatically retrieve blog posts + IList allBlogPosts = redisBlogPosts.GetAll(); - //Print out the data in the list of 'New.BlogPost' populated from old 'BlogPost' type - //Note: renamed fields are lost - Debug.WriteLine(allBlogPosts.Dump()); - /*Output: - [ - { - Id: 3, - BlogId: 2, - PostType: None, - Title: Redis, - Labels: [], - Tags: - [ - Redis, - NoSQL, - Scalability, - Performance - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-28T21:42:03.9484725Z - } - ] - }, - { - Id: 4, - BlogId: 2, - PostType: None, - Title: Couch Db, - Labels: [], - Tags: - [ - CouchDb, - NoSQL, - JSON - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-28T21:42:03.9484725Z - } - ] - }, - { - Id: 1, - BlogId: 1, - PostType: None, - Title: RavenDB, - Labels: [], - Tags: - [ - Raven, - NoSQL, - JSON, - .NET - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-28T21:42:03.9004697Z - }, - { - Content: Second Comment!, - CreatedDate: 2010-04-28T21:42:03.9004697Z - } - ] - }, - { - Id: 2, - BlogId: 1, - PostType: None, - Title: Cassandra, - Labels: [], - Tags: - [ - Cassandra, - NoSQL, - Scalability, - Hashing - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-28T21:42:03.9004697Z - } - ] - } - ] + //Print out the data in the list of 'New.BlogPost' populated from old 'BlogPost' type + //Note: renamed fields are lost + allBlogPosts.PrintDump(); + /*Output: + [ + { + Id: 3, + BlogId: 2, + PostType: None, + Title: Redis, + Labels: [], + Tags: + [ + Redis, + NoSQL, + Scalability, + Performance + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-28T21:42:03.9484725Z + } + ] + }, + { + Id: 4, + BlogId: 2, + PostType: None, + Title: Couch Db, + Labels: [], + Tags: + [ + CouchDb, + NoSQL, + JSON + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-28T21:42:03.9484725Z + } + ] + }, + { + Id: 1, + BlogId: 1, + PostType: None, + Title: RavenDB, + Labels: [], + Tags: + [ + Raven, + NoSQL, + JSON, + .NET + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-28T21:42:03.9004697Z + }, + { + Content: Second Comment!, + CreatedDate: 2010-04-28T21:42:03.9004697Z + } + ] + }, + { + Id: 2, + BlogId: 1, + PostType: None, + Title: Cassandra, + Labels: [], + Tags: + [ + Cassandra, + NoSQL, + Scalability, + Hashing + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-28T21:42:03.9004697Z + } + ] + } + ] - */ - } - } + */ + } - [Test] - public void Manually_migrate_to_new_Schema_using_a_custom_tranlation() - { - var repository = new BlogRepository(redisClient); + [Test] + public void Manually_migrate_to_new_Schema_using_a_custom_tranlation() + { + var repository = new BlogRepository(redisClient); - //Populate the datastore with the old schema from the 'BlogPostBestPractice' - BlogPostBestPractice.InsertTestData(repository); + //Populate the datastore with the old schema from the 'BlogPostBestPractice' + BlogPostBestPractice.InsertTestData(repository); - //Create a typed-client based on the new schema - using (var redisBlogPosts = redisClient.GetTypedClient()) - using (var redisNewBlogPosts = redisClient.GetTypedClient()) - { - //Automatically retrieve blog posts - IList oldBlogPosts = redisBlogPosts.GetAll(); + //Create a typed-client based on the new schema + var redisBlogPosts = redisClient.As(); + var redisNewBlogPosts = redisClient.As(); + //Automatically retrieve blog posts + IList oldBlogPosts = redisBlogPosts.GetAll(); - //Write a custom translation layer to migrate to the new schema - var migratedBlogPosts = oldBlogPosts.ConvertAll(old => new New.BlogPost - { - Id = old.Id, - BlogId = old.BlogId, - Title = old.Title, - Content = old.Content, - Labels = old.Categories, //populate with data from renamed field - PostType = New.BlogPostType.Article, //select non-default enum value - Tags = new HashSet(old.Tags), - Comments = old.Comments.ConvertAll(x => new Dictionary - { { "Content", x.Content }, { "CreatedDate", x.CreatedDate.ToString() }, }), - NoOfComments = old.Comments.Count, //populate using logic from old data - }); + //Write a custom translation layer to migrate to the new schema + var migratedBlogPosts = oldBlogPosts.Map(old => new New.BlogPost + { + Id = old.Id, + BlogId = old.BlogId, + Title = old.Title, + Content = old.Content, + Labels = old.Categories, //populate with data from renamed field + PostType = New.BlogPostType.Article, //select non-default enum value + Tags = new HashSet(old.Tags), + Comments = old.Comments.ConvertAll(x => new Dictionary { { "Content", x.Content }, { "CreatedDate", x.CreatedDate.ToString() }, }), + NoOfComments = old.Comments.Count, //populate using logic from old data + }); - //Persist the new migrated blogposts - redisNewBlogPosts.StoreAll(migratedBlogPosts); + //Persist the new migrated blogposts + redisNewBlogPosts.StoreAll(migratedBlogPosts); - //Read out the newly stored blogposts - var refreshedNewBlogPosts = redisNewBlogPosts.GetAll(); - //Note: data renamed fields are successfully migrated to the new schema - Debug.WriteLine(refreshedNewBlogPosts.Dump()); - /* - [ - { - Id: 3, - BlogId: 2, - PostType: Article, - Title: Redis, - Labels: - [ - NoSQL, - Cache - ], - Tags: - [ - Redis, - NoSQL, - Scalability, - Performance - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 28/04/2010 22:58:35 - } - ], - NoOfComments: 1 - }, - { - Id: 4, - BlogId: 2, - PostType: Article, - Title: Couch Db, - Labels: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - CouchDb, - NoSQL, - JSON - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 28/04/2010 22:58:35 - } - ], - NoOfComments: 1 - }, - { - Id: 1, - BlogId: 1, - PostType: Article, - Title: RavenDB, - Labels: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - Raven, - NoSQL, - JSON, - .NET - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 28/04/2010 22:58:35 - }, - { - Content: Second Comment!, - CreatedDate: 28/04/2010 22:58:35 - } - ], - NoOfComments: 2 - }, - { - Id: 2, - BlogId: 1, - PostType: Article, - Title: Cassandra, - Labels: - [ - NoSQL, - Cluster - ], - Tags: - [ - Cassandra, - NoSQL, - Scalability, - Hashing - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 28/04/2010 22:58:35 - } - ], - NoOfComments: 1 - } - ] + //Read out the newly stored blogposts + var refreshedNewBlogPosts = redisNewBlogPosts.GetAll(); + //Note: data renamed fields are successfully migrated to the new schema + refreshedNewBlogPosts.PrintDump(); + /* + [ + { + Id: 3, + BlogId: 2, + PostType: Article, + Title: Redis, + Labels: + [ + NoSQL, + Cache + ], + Tags: + [ + Redis, + NoSQL, + Scalability, + Performance + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 28/04/2010 22:58:35 + } + ], + NoOfComments: 1 + }, + { + Id: 4, + BlogId: 2, + PostType: Article, + Title: Couch Db, + Labels: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + CouchDb, + NoSQL, + JSON + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 28/04/2010 22:58:35 + } + ], + NoOfComments: 1 + }, + { + Id: 1, + BlogId: 1, + PostType: Article, + Title: RavenDB, + Labels: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + Raven, + NoSQL, + JSON, + .NET + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 28/04/2010 22:58:35 + }, + { + Content: Second Comment!, + CreatedDate: 28/04/2010 22:58:35 + } + ], + NoOfComments: 2 + }, + { + Id: 2, + BlogId: 1, + PostType: Article, + Title: Cassandra, + Labels: + [ + NoSQL, + Cluster + ], + Tags: + [ + Cassandra, + NoSQL, + Scalability, + Hashing + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 28/04/2010 22:58:35 + } + ], + NoOfComments: 1 + } + ] + */ + } - */ - } - } - - } + } } diff --git a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/IBlogRepository.cs b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/IBlogRepository.cs index 41579115..e26cdbc5 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/IBlogRepository.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/IBlogRepository.cs @@ -2,25 +2,25 @@ namespace ServiceStack.Redis.Tests.Examples.BestPractice { - public interface IBlogRepository - { - void StoreUsers(params User[] users); - List GetAllUsers(); + public interface IBlogRepository + { + void StoreUsers(params User[] users); + List GetAllUsers(); - void StoreBlogs(User user, params Blog[] users); - List GetBlogs(IEnumerable blogIds); - List GetAllBlogs(); + void StoreBlogs(User user, params Blog[] users); + List GetBlogs(IEnumerable blogIds); + List GetAllBlogs(); - List GetBlogPosts(IEnumerable blogPostIds); - void StoreNewBlogPosts(Blog blog, params BlogPost[] blogPosts); + List GetBlogPosts(IEnumerable blogPostIds); + void StoreNewBlogPosts(Blog blog, params BlogPost[] blogPosts); - List GetRecentBlogPosts(); - List GetRecentBlogPostComments(); - IDictionary GetTopTags(int take); - HashSet GetAllCategories(); + List GetRecentBlogPosts(); + List GetRecentBlogPostComments(); + IDictionary GetTopTags(int take); + HashSet GetAllCategories(); - void StoreBlogPost(BlogPost blogPost); - BlogPost GetBlogPost(int postId); - List GetBlogPostsByCategory(string categoryName); - } + void StoreBlogPost(BlogPost blogPost); + BlogPost GetBlogPost(int postId); + List GetBlogPostsByCategory(string categoryName); + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs b/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs index 2776e712..880fdbe7 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs @@ -3,589 +3,576 @@ using System.Diagnostics; using System.Linq; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Examples { - /// - /// A complete, self-contained example showing how to create a basic blog application using Redis. - /// - public class User - { - public User() - { - this.BlogIds = new List(); - } - - public long Id { get; set; } - public string Name { get; set; } - public List BlogIds { get; set; } - } - - public class Blog - { - public Blog() - { - this.Tags = new List(); - this.BlogPostIds = new List(); - } - - public long Id { get; set; } - public long UserId { get; set; } - public string UserName { get; set; } - public List Tags { get; set; } - public List BlogPostIds { get; set; } - } - - public class BlogPost - { - public BlogPost() - { - this.Categories = new List(); - this.Tags = new List(); - this.Comments = new List(); - } - - public long Id { get; set; } - public long BlogId { get; set; } - public string Title { get; set; } - public string Content { get; set; } - public List Categories { get; set; } - public List Tags { get; set; } - public List Comments { get; set; } - } - - public class BlogPostComment - { - public string Content { get; set; } - public DateTime CreatedDate { get; set; } - } - - - [TestFixture, Explicit, Category("Integration")] - public class BlogPostExample - { - readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); - - [SetUp] - public void OnBeforeEachTest() - { - redisClient.FlushAll(); - InsertTestData(); - } - - public void InsertTestData() - { - using (var redisUsers = redisClient.GetTypedClient()) - using (var redisBlogs = redisClient.GetTypedClient()) - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - var ayende = new User { Id = redisUsers.GetNextSequence(), Name = "Oren Eini" }; - var mythz = new User { Id = redisUsers.GetNextSequence(), Name = "Demis Bellot" }; - - var ayendeBlog = new Blog - { - Id = redisBlogs.GetNextSequence(), - UserId = ayende.Id, - UserName = ayende.Name, - Tags = new List { "Architecture", ".NET", "Databases" }, - }; - - var mythzBlog = new Blog - { - Id = redisBlogs.GetNextSequence(), - UserId = mythz.Id, - UserName = mythz.Name, - Tags = new List { "Architecture", ".NET", "Databases" }, - }; - - var blogPosts = new List - { - new BlogPost - { - Id = redisBlogPosts.GetNextSequence(), - BlogId = ayendeBlog.Id, - Title = "RavenDB", - Categories = new List { "NoSQL", "DocumentDB" }, - Tags = new List {"Raven", "NoSQL", "JSON", ".NET"} , - Comments = new List - { - new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,}, - new BlogPostComment { Content = "Second Comment!", CreatedDate = DateTime.UtcNow,}, - } - }, - new BlogPost - { - Id = redisBlogPosts.GetNextSequence(), - BlogId = mythzBlog.Id, - Title = "Redis", - Categories = new List { "NoSQL", "Cache" }, - Tags = new List {"Redis", "NoSQL", "Scalability", "Performance"}, - Comments = new List - { - new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} - } - }, - new BlogPost - { - Id = redisBlogPosts.GetNextSequence(), - BlogId = ayendeBlog.Id, - Title = "Cassandra", - Categories = new List { "NoSQL", "Cluster" }, - Tags = new List {"Cassandra", "NoSQL", "Scalability", "Hashing"}, - Comments = new List - { - new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} - } - }, - new BlogPost - { - Id = redisBlogPosts.GetNextSequence(), - BlogId = mythzBlog.Id, - Title = "Couch Db", - Categories = new List { "NoSQL", "DocumentDB" }, - Tags = new List {"CouchDb", "NoSQL", "JSON"}, - Comments = new List - { - new BlogPostComment {Content = "First Comment!", CreatedDate = DateTime.UtcNow,} - } - }, - }; - - ayende.BlogIds.Add(ayendeBlog.Id); - ayendeBlog.BlogPostIds.AddRange(blogPosts.Where(x => x.BlogId == ayendeBlog.Id).ConvertAll(x => x.Id)); - - mythz.BlogIds.Add(mythzBlog.Id); - mythzBlog.BlogPostIds.AddRange(blogPosts.Where(x => x.BlogId == mythzBlog.Id).ConvertAll(x => x.Id)); - - redisUsers.Store(ayende); - redisUsers.Store(mythz); - redisBlogs.StoreAll(new[] { ayendeBlog, mythzBlog }); - redisBlogPosts.StoreAll(blogPosts); - } - } - - [Test] - public void Show_a_list_of_blogs() - { - using (var redisBlogs = redisClient.GetTypedClient()) - { - var blogs = redisBlogs.GetAll(); - Debug.WriteLine(blogs.Dump()); - } - /* Output: - [ - { - Id: 1, - UserId: 1, - UserName: Ayende, - Tags: - [ - Architecture, - .NET, - Databases - ], - BlogPostIds: - [ - 1, - 3 - ] - }, - { - Id: 2, - UserId: 2, - UserName: Demis, - Tags: - [ - Architecture, - .NET, - Databases - ], - BlogPostIds: - [ - 2, - 4 - ] - } - ] - */ - } - - [Test] - public void Show_a_list_of_recent_posts_and_comments() - { - //Get strongly-typed clients - using (var redisBlogPosts = redisClient.GetTypedClient()) - using (var redisComments = redisClient.GetTypedClient()) - { - //To keep this example let's pretend this is a new list of blog posts - var newIncomingBlogPosts = redisBlogPosts.GetAll(); - - //Let's get back an IList wrapper around a Redis server-side List. - var recentPosts = redisBlogPosts.Lists["urn:BlogPost:RecentPosts"]; - var recentComments = redisComments.Lists["urn:BlogPostComment:RecentComments"]; - - foreach (var newBlogPost in newIncomingBlogPosts) - { - //Prepend the new blog posts to the start of the 'RecentPosts' list - recentPosts.Prepend(newBlogPost); - - //Prepend all the new blog post comments to the start of the 'RecentComments' list - newBlogPost.Comments.ForEach(recentComments.Prepend); - } - - //Make this a Rolling list by only keep the latest 3 posts and comments - recentPosts.Trim(0, 2); - recentComments.Trim(0, 2); - - //Print out the last 3 posts: - Debug.WriteLine(recentPosts.GetAll().Dump()); - /* Output: - [ - { - Id: 2, - BlogId: 2, - Title: Redis, - Categories: - [ - NoSQL, - Cache - ], - Tags: - [ - Redis, - NoSQL, - Scalability, - Performance - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T22:14:02.755878Z - } - ] - }, - { - Id: 1, - BlogId: 1, - Title: RavenDB, - Categories: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - Raven, - NoSQL, - JSON, - .NET - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T22:14:02.755878Z - }, - { - Content: Second Comment!, - CreatedDate: 2010-04-20T22:14:02.755878Z - } - ] - }, - { - Id: 4, - BlogId: 2, - Title: Couch Db, - Categories: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - CouchDb, - NoSQL, - JSON - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T22:14:02.755878Z - } - ] - } - ] - */ - - Debug.WriteLine(recentComments.GetAll().Dump()); - /* Output: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T20:32:42.2970956Z - }, - { - Content: First Comment!, - CreatedDate: 2010-04-20T20:32:42.2970956Z - }, - { - Content: First Comment!, - CreatedDate: 2010-04-20T20:32:42.2970956Z - } - ] - */ - } - } - - [Test] - public void Show_a_TagCloud() - { - //Get strongly-typed clients - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - var newIncomingBlogPosts = redisBlogPosts.GetAll(); - - foreach (var newBlogPost in newIncomingBlogPosts) - { - //For every tag in each new blog post, increment the number of times each Tag has occurred - newBlogPost.Tags.ForEach(x => - redisClient.IncrementItemInSortedSet("urn:TagCloud", x, 1)); - } - - //Show top 5 most popular tags with their scores - var tagCloud = redisClient.GetRangeWithScoresFromSortedSetDesc("urn:TagCloud", 0, 4); - Debug.WriteLine(tagCloud.Dump()); - } - /* Output: - [ - [ - NoSQL, - 4 - ], - [ - Scalability, - 2 - ], - [ - JSON, - 2 - ], - [ - Redis, - 1 - ], - [ - Raven, - 1 - ], - ] - */ - } - - [Test] - public void Show_all_Categories() - { - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - var blogPosts = redisBlogPosts.GetAll(); - - foreach (var blogPost in blogPosts) - { - blogPost.Categories.ForEach(x => - redisClient.AddItemToSet("urn:Categories", x)); - } - - var uniqueCategories = redisClient.GetAllItemsFromSet("urn:Categories"); - Debug.WriteLine(uniqueCategories.Dump()); - /* Output: - [ - DocumentDB, - NoSQL, - Cluster, - Cache - ] - */ - } - } - - [Test] - public void Show_post_and_all_comments() - { - //There is nothing special required here as since comments are Key Value Objects - //they are stored and retrieved with the post - var postId = 1; - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - var selectedBlogPost = redisBlogPosts.GetById(postId.ToString()); - - Debug.WriteLine(selectedBlogPost.Dump()); - /* Output: - { - Id: 1, - BlogId: 1, - Title: RavenDB, - Categories: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - Raven, - NoSQL, - JSON, - .NET - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T21:26:31.9918236Z - }, - { - Content: Second Comment!, - CreatedDate: 2010-04-20T21:26:31.9918236Z - } - ] - } - */ - } - } - - [Test] - public void Add_comment_to_existing_post() - { - var postId = 1; - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - var blogPost = redisBlogPosts.GetById(postId.ToString()); - blogPost.Comments.Add( - new BlogPostComment { Content = "Third Post!", CreatedDate = DateTime.UtcNow }); - redisBlogPosts.Store(blogPost); - - var refreshBlogPost = redisBlogPosts.GetById(postId.ToString()); - Debug.WriteLine(refreshBlogPost.Dump()); - /* Output: - { - Id: 1, - BlogId: 1, - Title: RavenDB, - Categories: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - Raven, - NoSQL, - JSON, - .NET - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T21:32:39.9688707Z - }, - { - Content: Second Comment!, - CreatedDate: 2010-04-20T21:32:39.9688707Z - }, - { - Content: Third Post!, - CreatedDate: 2010-04-20T21:32:40.2688879Z - } - ] - } - */ - } - } - - [Test] - public void Show_all_Posts_for_the_DocumentDB_Category() - { - using (var redisBlogPosts = redisClient.GetTypedClient()) - { - var newIncomingBlogPosts = redisBlogPosts.GetAll(); - - foreach (var newBlogPost in newIncomingBlogPosts) - { - //For each post add it's Id into each of it's 'Cateogry > Posts' index - newBlogPost.Categories.ForEach(x => - redisClient.AddItemToSet("urn:Category:" + x, newBlogPost.Id.ToString())); - } - - //Retrieve all the post ids for the category you want to view - var documentDbPostIds = redisClient.GetAllItemsFromSet("urn:Category:DocumentDB"); - - //Make a batch call to retrieve all the posts containing the matching ids - //(i.e. the DocumentDB Category posts) - var documentDbPosts = redisBlogPosts.GetByIds(documentDbPostIds); - - Debug.WriteLine(documentDbPosts.Dump()); - /* Output: - [ - { - Id: 4, - BlogId: 2, - Title: Couch Db, - Categories: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - CouchDb, - NoSQL, - JSON - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T21:38:24.6305842Z - } - ] - }, - { - Id: 1, - BlogId: 1, - Title: RavenDB, - Categories: - [ - NoSQL, - DocumentDB - ], - Tags: - [ - Raven, - NoSQL, - JSON, - .NET - ], - Comments: - [ - { - Content: First Comment!, - CreatedDate: 2010-04-20T21:38:24.6295842Z - }, - { - Content: Second Comment!, - CreatedDate: 2010-04-20T21:38:24.6295842Z - } - ] - } - ] - */ - } - } - - } + /// + /// A complete, self-contained example showing how to create a basic blog application using Redis. + /// + public class User + { + public User() + { + this.BlogIds = new List(); + } + + public long Id { get; set; } + public string Name { get; set; } + public List BlogIds { get; set; } + } + + public class Blog + { + public Blog() + { + this.Tags = new List(); + this.BlogPostIds = new List(); + } + + public long Id { get; set; } + public long UserId { get; set; } + public string UserName { get; set; } + public List Tags { get; set; } + public List BlogPostIds { get; set; } + } + + public class BlogPost + { + public BlogPost() + { + this.Categories = new List(); + this.Tags = new List(); + this.Comments = new List(); + } + + public long Id { get; set; } + public long BlogId { get; set; } + public string Title { get; set; } + public string Content { get; set; } + public List Categories { get; set; } + public List Tags { get; set; } + public List Comments { get; set; } + } + + public class BlogPostComment + { + public string Content { get; set; } + public DateTime CreatedDate { get; set; } + } + + + [TestFixture, Ignore("Integration"), Category("Integration")] + public class BlogPostExample + { + readonly RedisClient redis = new RedisClient(TestConfig.SingleHost); + + [SetUp] + public void OnBeforeEachTest() + { + redis.FlushAll(); + InsertTestData(); + } + + public void InsertTestData() + { + var redisUsers = redis.As(); + var redisBlogs = redis.As(); + var redisBlogPosts = redis.As(); + + var ayende = new User { Id = redisUsers.GetNextSequence(), Name = "Oren Eini" }; + var mythz = new User { Id = redisUsers.GetNextSequence(), Name = "Demis Bellot" }; + + var ayendeBlog = new Blog + { + Id = redisBlogs.GetNextSequence(), + UserId = ayende.Id, + UserName = ayende.Name, + Tags = new List { "Architecture", ".NET", "Databases" }, + }; + + var mythzBlog = new Blog + { + Id = redisBlogs.GetNextSequence(), + UserId = mythz.Id, + UserName = mythz.Name, + Tags = new List { "Architecture", ".NET", "Databases" }, + }; + + var blogPosts = new List + { + new BlogPost + { + Id = redisBlogPosts.GetNextSequence(), + BlogId = ayendeBlog.Id, + Title = "RavenDB", + Categories = new List { "NoSQL", "DocumentDB" }, + Tags = new List {"Raven", "NoSQL", "JSON", ".NET"} , + Comments = new List + { + new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,}, + new BlogPostComment { Content = "Second Comment!", CreatedDate = DateTime.UtcNow,}, + } + }, + new BlogPost + { + Id = redisBlogPosts.GetNextSequence(), + BlogId = mythzBlog.Id, + Title = "Redis", + Categories = new List { "NoSQL", "Cache" }, + Tags = new List {"Redis", "NoSQL", "Scalability", "Performance"}, + Comments = new List + { + new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} + } + }, + new BlogPost + { + Id = redisBlogPosts.GetNextSequence(), + BlogId = ayendeBlog.Id, + Title = "Cassandra", + Categories = new List { "NoSQL", "Cluster" }, + Tags = new List {"Cassandra", "NoSQL", "Scalability", "Hashing"}, + Comments = new List + { + new BlogPostComment { Content = "First Comment!", CreatedDate = DateTime.UtcNow,} + } + }, + new BlogPost + { + Id = redisBlogPosts.GetNextSequence(), + BlogId = mythzBlog.Id, + Title = "Couch Db", + Categories = new List { "NoSQL", "DocumentDB" }, + Tags = new List {"CouchDb", "NoSQL", "JSON"}, + Comments = new List + { + new BlogPostComment {Content = "First Comment!", CreatedDate = DateTime.UtcNow,} + } + }, + }; + + ayende.BlogIds.Add(ayendeBlog.Id); + ayendeBlog.BlogPostIds.AddRange(blogPosts.Where(x => x.BlogId == ayendeBlog.Id).Map(x => x.Id)); + + mythz.BlogIds.Add(mythzBlog.Id); + mythzBlog.BlogPostIds.AddRange(blogPosts.Where(x => x.BlogId == mythzBlog.Id).Map(x => x.Id)); + + redisUsers.Store(ayende); + redisUsers.Store(mythz); + redisBlogs.StoreAll(new[] { ayendeBlog, mythzBlog }); + redisBlogPosts.StoreAll(blogPosts); + } + + [Test] + public void Show_a_list_of_blogs() + { + var redisBlogs = redis.As(); + var blogs = redisBlogs.GetAll(); + blogs.PrintDump(); + } + /* Output: + [ + { + Id: 1, + UserId: 1, + UserName: Ayende, + Tags: + [ + Architecture, + .NET, + Databases + ], + BlogPostIds: + [ + 1, + 3 + ] + }, + { + Id: 2, + UserId: 2, + UserName: Demis, + Tags: + [ + Architecture, + .NET, + Databases + ], + BlogPostIds: + [ + 2, + 4 + ] + } + ] + */ + + [Test] + public void Show_a_list_of_recent_posts_and_comments() + { + //Get strongly-typed clients + var redisBlogPosts = redis.As(); + var redisComments = redis.As(); + { + //To keep this example let's pretend this is a new list of blog posts + var newIncomingBlogPosts = redisBlogPosts.GetAll(); + + //Let's get back an IList wrapper around a Redis server-side List. + var recentPosts = redisBlogPosts.Lists["urn:BlogPost:RecentPosts"]; + var recentComments = redisComments.Lists["urn:BlogPostComment:RecentComments"]; + + foreach (var newBlogPost in newIncomingBlogPosts) + { + //Prepend the new blog posts to the start of the 'RecentPosts' list + recentPosts.Prepend(newBlogPost); + + //Prepend all the new blog post comments to the start of the 'RecentComments' list + newBlogPost.Comments.ForEach(recentComments.Prepend); + } + + //Make this a Rolling list by only keep the latest 3 posts and comments + recentPosts.Trim(0, 2); + recentComments.Trim(0, 2); + + //Print out the last 3 posts: + recentPosts.GetAll().PrintDump(); + /* Output: + [ + { + Id: 2, + BlogId: 2, + Title: Redis, + Categories: + [ + NoSQL, + Cache + ], + Tags: + [ + Redis, + NoSQL, + Scalability, + Performance + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T22:14:02.755878Z + } + ] + }, + { + Id: 1, + BlogId: 1, + Title: RavenDB, + Categories: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + Raven, + NoSQL, + JSON, + .NET + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T22:14:02.755878Z + }, + { + Content: Second Comment!, + CreatedDate: 2010-04-20T22:14:02.755878Z + } + ] + }, + { + Id: 4, + BlogId: 2, + Title: Couch Db, + Categories: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + CouchDb, + NoSQL, + JSON + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T22:14:02.755878Z + } + ] + } + ] + */ + + recentComments.GetAll().PrintDump(); + /* Output: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T20:32:42.2970956Z + }, + { + Content: First Comment!, + CreatedDate: 2010-04-20T20:32:42.2970956Z + }, + { + Content: First Comment!, + CreatedDate: 2010-04-20T20:32:42.2970956Z + } + ] + */ + } + } + + [Test] + public void Show_a_TagCloud() + { + //Get strongly-typed clients + var redisBlogPosts = redis.As(); + var newIncomingBlogPosts = redisBlogPosts.GetAll(); + + foreach (var newBlogPost in newIncomingBlogPosts) + { + //For every tag in each new blog post, increment the number of times each Tag has occurred + newBlogPost.Tags.ForEach(x => + redis.IncrementItemInSortedSet("urn:TagCloud", x, 1)); + } + + //Show top 5 most popular tags with their scores + var tagCloud = redis.GetRangeWithScoresFromSortedSetDesc("urn:TagCloud", 0, 4); + tagCloud.PrintDump(); + /* Output: + [ + [ + NoSQL, + 4 + ], + [ + Scalability, + 2 + ], + [ + JSON, + 2 + ], + [ + Redis, + 1 + ], + [ + Raven, + 1 + ], + ] + */ + } + + [Test] + public void Show_all_Categories() + { + var redisBlogPosts = redis.As(); + var blogPosts = redisBlogPosts.GetAll(); + + foreach (var blogPost in blogPosts) + { + blogPost.Categories.ForEach(x => + redis.AddItemToSet("urn:Categories", x)); + } + + var uniqueCategories = redis.GetAllItemsFromSet("urn:Categories"); + uniqueCategories.PrintDump(); + /* Output: + [ + DocumentDB, + NoSQL, + Cluster, + Cache + ] + */ + } + + [Test] + public void Show_post_and_all_comments() + { + //There is nothing special required here as since comments are Key Value Objects + //they are stored and retrieved with the post + var postId = 1; + var redisBlogPosts = redis.As(); + var selectedBlogPost = redisBlogPosts.GetById(postId.ToString()); + + selectedBlogPost.PrintDump(); + /* Output: + { + Id: 1, + BlogId: 1, + Title: RavenDB, + Categories: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + Raven, + NoSQL, + JSON, + .NET + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T21:26:31.9918236Z + }, + { + Content: Second Comment!, + CreatedDate: 2010-04-20T21:26:31.9918236Z + } + ] + } + */ + } + + [Test] + public void Add_comment_to_existing_post() + { + var postId = 1; + var redisBlogPosts = redis.As(); + var blogPost = redisBlogPosts.GetById(postId.ToString()); + blogPost.Comments.Add( + new BlogPostComment { Content = "Third Post!", CreatedDate = DateTime.UtcNow }); + redisBlogPosts.Store(blogPost); + + var refreshBlogPost = redisBlogPosts.GetById(postId.ToString()); + refreshBlogPost.PrintDump(); + /* Output: + { + Id: 1, + BlogId: 1, + Title: RavenDB, + Categories: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + Raven, + NoSQL, + JSON, + .NET + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T21:32:39.9688707Z + }, + { + Content: Second Comment!, + CreatedDate: 2010-04-20T21:32:39.9688707Z + }, + { + Content: Third Post!, + CreatedDate: 2010-04-20T21:32:40.2688879Z + } + ] + } + */ + } + + [Test] + public void Show_all_Posts_for_the_DocumentDB_Category() + { + var redisBlogPosts = redis.As(); + var newIncomingBlogPosts = redisBlogPosts.GetAll(); + + foreach (var newBlogPost in newIncomingBlogPosts) + { + //For each post add it's Id into each of it's 'Cateogry > Posts' index + newBlogPost.Categories.ForEach(x => + redis.AddItemToSet("urn:Category:" + x, newBlogPost.Id.ToString())); + } + + //Retrieve all the post ids for the category you want to view + var documentDbPostIds = redis.GetAllItemsFromSet("urn:Category:DocumentDB"); + + //Make a batch call to retrieve all the posts containing the matching ids + //(i.e. the DocumentDB Category posts) + var documentDbPosts = redisBlogPosts.GetByIds(documentDbPostIds); + + documentDbPosts.PrintDump(); + /* Output: + [ + { + Id: 4, + BlogId: 2, + Title: Couch Db, + Categories: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + CouchDb, + NoSQL, + JSON + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T21:38:24.6305842Z + } + ] + }, + { + Id: 1, + BlogId: 1, + Title: RavenDB, + Categories: + [ + NoSQL, + DocumentDB + ], + Tags: + [ + Raven, + NoSQL, + JSON, + .NET + ], + Comments: + [ + { + Content: First Comment!, + CreatedDate: 2010-04-20T21:38:24.6295842Z + }, + { + Content: Second Comment!, + CreatedDate: 2010-04-20T21:38:24.6295842Z + } + ] + } + ] + */ + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs b/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs new file mode 100644 index 00000000..92cf0e27 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs @@ -0,0 +1,48 @@ +using System.Linq; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests.Examples +{ + [TestFixture, Ignore("Integration")] + public class ServiceStack_Redis_UseCase + { + public class Todo + { + public long Id { get; set; } + public string Content { get; set; } + public int Order { get; set; } + public bool Done { get; set; } + } + + [Test] + public void Can_Add_Update_and_Delete_Todo_item() + { + using (var redisManager = new PooledRedisClientManager()) + using (var redis = redisManager.GetClient()) + { + var redisTodos = redis.As(); + var todo = new Todo + { + Id = redisTodos.GetNextSequence(), + Content = "Learn Redis", + Order = 1, + }; + + redisTodos.Store(todo); + + Todo savedTodo = redisTodos.GetById(todo.Id); + savedTodo.Done = true; + redisTodos.Store(savedTodo); + + "Updated Todo:".Print(); + redisTodos.GetAll().ToList().PrintDump(); + + redisTodos.DeleteById(savedTodo.Id); + + "No more Todos:".Print(); + redisTodos.GetAll().ToList().PrintDump(); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs b/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs index 8883907f..d225a6c7 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs @@ -8,114 +8,111 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] - public class SimpleExamples - { - readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); + [TestFixture, Ignore("Integration"), Category("Integration")] + public class SimpleExamples + { + readonly RedisClient redis = new RedisClient(TestConfig.SingleHost); - [SetUp] - public void OnBeforeEachTest() - { - redisClient.FlushAll(); - } + [SetUp] + public void OnBeforeEachTest() + { + redis.FlushAll(); + } - [Test] - public void Store_and_retrieve_users() - { - using (var redisUsers = redisClient.GetTypedClient()) - { - redisUsers.Store(new User { Id = redisUsers.GetNextSequence(), Name = "ayende" }); - redisUsers.Store(new User { Id = redisUsers.GetNextSequence(), Name = "mythz" }); + [Test] + public void Store_and_retrieve_users() + { + var redisUsers = redis.As(); + redisUsers.Store(new User { Id = redisUsers.GetNextSequence(), Name = "ayende" }); + redisUsers.Store(new User { Id = redisUsers.GetNextSequence(), Name = "mythz" }); - var allUsers = redisUsers.GetAll(); - Debug.WriteLine(allUsers.Dump()); - } - /*Output - [ - { - Id: 1, - Name: ayende, - BlogIds: [] - }, - { - Id: 2, - Name: mythz, - BlogIds: [] - } - ] - */ - } + var allUsers = redisUsers.GetAll(); + allUsers.PrintDump(); + /*Output + [ + { + Id: 1, + Name: ayende, + BlogIds: [] + }, + { + Id: 2, + Name: mythz, + BlogIds: [] + } + ] + */ + } - [Test] - public void Store_and_retrieve_some_blogs() - { - //Retrieve strongly-typed Redis clients that let's you natively persist POCO's - using (var redisUsers = redisClient.GetTypedClient()) - using (var redisBlogs = redisClient.GetTypedClient()) - { - //Create the user, getting a unique User Id from the User sequence. - var mythz = new User { Id = redisUsers.GetNextSequence(), Name = "Demis Bellot" }; + [Test] + public void Store_and_retrieve_some_blogs() + { + //Retrieve strongly-typed Redis clients that let's you natively persist POCO's + var redisUsers = redis.As(); + var redisBlogs = redis.As(); + //Create the user, getting a unique User Id from the User sequence. + var mythz = new User { Id = redisUsers.GetNextSequence(), Name = "Demis Bellot" }; - //create some blogs using unique Ids from the Blog sequence. Also adding references - var mythzBlogs = new List - { - new Blog - { - Id = redisBlogs.GetNextSequence(), - UserId = mythz.Id, - UserName = mythz.Name, - Tags = new List { "Architecture", ".NET", "Redis" }, - }, - new Blog - { - Id = redisBlogs.GetNextSequence(), - UserId = mythz.Id, - UserName = mythz.Name, - Tags = new List { "Music", "Twitter", "Life" }, - }, - }; - //Add the blog references - mythzBlogs.ForEach(x => mythz.BlogIds.Add(x.Id)); + //create some blogs using unique Ids from the Blog sequence. Also adding references + var mythzBlogs = new List + { + new Blog + { + Id = redisBlogs.GetNextSequence(), + UserId = mythz.Id, + UserName = mythz.Name, + Tags = new List { "Architecture", ".NET", "Redis" }, + }, + new Blog + { + Id = redisBlogs.GetNextSequence(), + UserId = mythz.Id, + UserName = mythz.Name, + Tags = new List { "Music", "Twitter", "Life" }, + }, + }; + //Add the blog references + mythzBlogs.ForEach(x => mythz.BlogIds.Add(x.Id)); - //Store the user and their blogs - redisUsers.Store(mythz); - redisBlogs.StoreAll(mythzBlogs); + //Store the user and their blogs + redisUsers.Store(mythz); + redisBlogs.StoreAll(mythzBlogs); - //retrieve all blogs - var blogs = redisBlogs.GetAll(); + //retrieve all blogs + var blogs = redisBlogs.GetAll(); - //Recursively print the values of the POCO (For T.Dump() Extension method see: http://www.servicestack.net/mythz_blog/?p=202) - Debug.WriteLine(blogs.Dump()); - } - /*Output - [ - { - Id: 1, - UserId: 1, - UserName: Demis Bellot, - Tags: - [ - Architecture, - .NET, - Redis - ], - BlogPostIds: [] - }, - { - Id: 2, - UserId: 1, - UserName: Demis Bellot, - Tags: - [ - Music, - Twitter, - Life - ], - BlogPostIds: [] - } - ] - */ - } + //Recursively print the values of the POCO (For T.Dump() Extension method see: http://mono.servicestack.net/mythz_blog/?p=202) + blogs.PrintDump(); - } + /*Output + [ + { + Id: 1, + UserId: 1, + UserName: Demis Bellot, + Tags: + [ + Architecture, + .NET, + Redis + ], + BlogPostIds: [] + }, + { + Id: 2, + UserId: 1, + UserName: Demis Bellot, + Tags: + [ + Music, + Twitter, + Life + ], + BlogPostIds: [] + } + ] + */ + } + + } } diff --git a/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs b/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs index 909bf5fe..4abfc881 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs @@ -1,101 +1,99 @@ using System; using System.Collections.Generic; using System.Diagnostics; -using System.Linq; -using System.Text; using System.Threading; +using System.Threading.Tasks; using NUnit.Framework; -using ServiceStack.Common.Extensions; namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] - public class SimpleLocks - { - [TestFixtureSetUp] - public void OnTestFixtureSetUp() - { - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - redisClient.FlushAll(); - } - } - - [Test] - public void Use_multiple_redis_clients_to_safely_execute() - { - //The number of concurrent clients to run - const int noOfClients = 5; - var asyncResults = new List(noOfClients); - for (var i = 1; i <= noOfClients; i++) - { - var clientNo = i; - var actionFn = (Action)delegate - { - var redisClient = new RedisClient(TestConfig.SingleHost); - using (redisClient.AcquireLock("testlock")) - { - Debug.WriteLine(String.Format("client {0} acquired lock", clientNo)); - var counter = redisClient.Get("atomic-counter"); - - //Add an artificial delay to demonstrate locking behaviour - Thread.Sleep(100); - - redisClient.Set("atomic-counter", counter + 1); - Debug.WriteLine(String.Format("client {0} released lock", clientNo)); - } - }; - - //Asynchronously invoke the above delegate in a background thread - asyncResults.Add(actionFn.BeginInvoke(null, null)); - } - - //Wait at most 1 second for all the threads to complete - asyncResults.WaitAll(TimeSpan.FromSeconds(1)); - - //Print out the 'atomic-counter' result - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - var counter = redisClient.Get("atomic-counter"); - Debug.WriteLine(String.Format("atomic-counter after 1sec: {0}", counter)); - } - } - - [Test] - public void Acquiring_lock_with_timeout() - { - var redisClient = new RedisClient(TestConfig.SingleHost); - - //Initialize and set counter to '1' - redisClient.IncrementValue("atomic-counter"); - - //Acquire lock and never release it - redisClient.AcquireLock("testlock"); - - var waitFor = TimeSpan.FromSeconds(2); - var now = DateTime.Now; - - try - { - using (var newClient = new RedisClient(TestConfig.SingleHost)) - { - //Attempt to acquire a lock with a 2 second timeout - using (newClient.AcquireLock("testlock", waitFor)) - { - //If lock was acquired this would be incremented to '2' - redisClient.IncrementValue("atomic-counter"); - } - } - } - catch (TimeoutException tex) - { - var timeTaken = DateTime.Now - now; - Debug.WriteLine(String.Format("After '{0}', Received TimeoutException: '{1}'", timeTaken, tex.Message)); - - var counter = redisClient.Get("atomic-counter"); - Debug.WriteLine(String.Format("atomic-counter remains at '{0}'", counter)); - } - } + [TestFixture, Ignore("Integration"), Category("Integration")] + public class SimpleLocks + { + [SetUp] + public void OnTestFixtureSetUp() + { + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + redisClient.FlushAll(); + } + } + + [Test] + public void Use_multiple_redis_clients_to_safely_execute() + { + //The number of concurrent clients to run + const int noOfClients = 5; + var asyncResults = new List(noOfClients); + for (var i = 1; i <= noOfClients; i++) + { + var clientNo = i; + var actionFn = (Action)delegate + { + var redisClient = new RedisClient(TestConfig.SingleHost); + using (redisClient.AcquireLock("testlock")) + { + Debug.WriteLine(String.Format("client {0} acquired lock", clientNo)); + var counter = redisClient.Get("atomic-counter"); + + //Add an artificial delay to demonstrate locking behaviour + Thread.Sleep(100); + + redisClient.Set("atomic-counter", counter + 1); + Debug.WriteLine(String.Format("client {0} released lock", clientNo)); + } + }; + + //Asynchronously invoke the above delegate in a background thread + asyncResults.Add(actionFn.BeginInvoke(null, null)); + } + + //Wait at most 1 second for all the threads to complete + asyncResults.WaitAll(TimeSpan.FromSeconds(1)); + + //Print out the 'atomic-counter' result + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + var counter = redisClient.Get("atomic-counter"); + Debug.WriteLine(String.Format("atomic-counter after 1sec: {0}", counter)); + } + } + + [Test] + public void Acquiring_lock_with_timeout() + { + var redisClient = new RedisClient(TestConfig.SingleHost); + + //Initialize and set counter to '1' + redisClient.IncrementValue("atomic-counter"); + + //Acquire lock and never release it + redisClient.AcquireLock("testlock"); + + var waitFor = TimeSpan.FromSeconds(2); + var now = DateTime.Now; + + try + { + using (var newClient = new RedisClient(TestConfig.SingleHost)) + { + //Attempt to acquire a lock with a 2 second timeout + using (newClient.AcquireLock("testlock", waitFor)) + { + //If lock was acquired this would be incremented to '2' + redisClient.IncrementValue("atomic-counter"); + } + } + } + catch (TimeoutException tex) + { + var timeTaken = DateTime.Now - now; + Debug.WriteLine(String.Format("After '{0}', Received TimeoutException: '{1}'", timeTaken, tex.Message)); + + var counter = redisClient.Get("atomic-counter"); + Debug.WriteLine(String.Format("atomic-counter remains at '{0}'", counter)); + } + } [Test] public void SimulateLockTimeout() @@ -103,15 +101,52 @@ public void SimulateLockTimeout() var redisClient = new RedisClient(TestConfig.SingleHost); var waitFor = TimeSpan.FromMilliseconds(20); - var loc = redisClient.AcquireLock("testlock",waitFor); - Thread.Sleep(40); //should have lock expire - using(var newloc = redisClient.AcquireLock("testlock", waitFor)) + var loc = redisClient.AcquireLock("testlock", waitFor); + Thread.Sleep(100); //should have lock expire + using (var newloc = redisClient.AcquireLock("testlock", waitFor)) + { + + } + } + + [Test] + public void AcquireLock_using_Tasks() + { + const int noOfClients = 4; + var tasks = new Task[noOfClients]; + for (var i = 0; i < noOfClients; i++) { - + Thread.Sleep(2000); + tasks[i] = Task.Factory.StartNew((object clientNo) => + { + try + { + Console.WriteLine("About to process " + clientNo); + //var redisClient = new RedisClient("xxxx.redis.cache.windows.net", 6379, "xxxx"); + var redisClient = new RedisClient(TestConfig.SingleHost, 6379); + + using (redisClient.AcquireLock("testlock1", TimeSpan.FromMinutes(3))) + { + Console.WriteLine("client {0} acquired lock", (int)clientNo); + var counter = redisClient.Get("atomic-counter"); + + //Add an artificial delay to demonstrate locking behaviour + Thread.Sleep(100); + + redisClient.Set("atomic-counter", counter + 1); + Console.WriteLine("client {0} released lock", (int)clientNo); + } + } + catch (Exception e) + { + Console.WriteLine(e.Message); + } + + }, i + 1); } } - } + } } diff --git a/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs b/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs index caef5a64..207f8d7c 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs @@ -2,76 +2,76 @@ using System.Diagnostics; using System.Threading; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] - public class SimplePubSub - { + [TestFixture, Ignore("Integration"), Category("Integration")] + public class SimplePubSub + { const string ChannelName = "SimplePubSubCHANNEL"; - const string MessagePrefix = "MESSAGE "; - const int PublishMessageCount = 5; - - [TestFixtureSetUp] - public void TestFixtureSetUp() - { - using (var redis = new RedisClient(TestConfig.SingleHost)) - { - redis.FlushAll(); - } - } - - [Test] - public void Publish_and_receive_5_messages() - { - var messagesReceived = 0; - - using (var redisConsumer = new RedisClient(TestConfig.SingleHost)) - using (var subscription = redisConsumer.CreateSubscription()) - { - subscription.OnSubscribe = channel => - { - Debug.WriteLine(String.Format("Subscribed to '{0}'", channel)); - }; - subscription.OnUnSubscribe = channel => - { - Debug.WriteLine(String.Format("UnSubscribed from '{0}'", channel)); - }; - subscription.OnMessage = (channel, msg) => - { - Debug.WriteLine(String.Format("Received '{0}' from channel '{1}'", msg, channel)); - - //As soon as we've received all 5 messages, disconnect by unsubscribing to all channels - if (++messagesReceived == PublishMessageCount) - { - subscription.UnSubscribeFromAllChannels(); - } - }; - - ThreadPool.QueueUserWorkItem(x => - { - Thread.Sleep(200); - Debug.WriteLine("Begin publishing messages..."); - - using (var redisPublisher = new RedisClient(TestConfig.SingleHost)) - { - for (var i = 1; i <= PublishMessageCount; i++) - { - var message = MessagePrefix + i; - Debug.WriteLine(String.Format("Publishing '{0}' to '{1}'", message, ChannelName)); - redisPublisher.PublishMessage(ChannelName, message); - } - } - }); - - Debug.WriteLine(String.Format("Started Listening On '{0}'", ChannelName)); - subscription.SubscribeToChannels(ChannelName); //blocking - } - - Debug.WriteLine("EOF"); - - /*Output: + const string MessagePrefix = "MESSAGE "; + const int PublishMessageCount = 5; + + [OneTimeSetUp] + public void OneTimeSetUp() + { + using (var redis = new RedisClient(TestConfig.SingleHost)) + { + redis.FlushAll(); + } + } + + [Test] + public void Publish_and_receive_5_messages() + { + var messagesReceived = 0; + + using (var redisConsumer = new RedisClient(TestConfig.SingleHost)) + using (var subscription = redisConsumer.CreateSubscription()) + { + subscription.OnSubscribe = channel => + { + Debug.WriteLine(String.Format("Subscribed to '{0}'", channel)); + }; + subscription.OnUnSubscribe = channel => + { + Debug.WriteLine(String.Format("UnSubscribed from '{0}'", channel)); + }; + subscription.OnMessage = (channel, msg) => + { + Debug.WriteLine(String.Format("Received '{0}' from channel '{1}'", msg, channel)); + + //As soon as we've received all 5 messages, disconnect by unsubscribing to all channels + if (++messagesReceived == PublishMessageCount) + { + subscription.UnSubscribeFromAllChannels(); + } + }; + + ThreadPool.QueueUserWorkItem(x => + { + Thread.Sleep(200); + Debug.WriteLine("Begin publishing messages..."); + + using (var redisPublisher = new RedisClient(TestConfig.SingleHost)) + { + for (var i = 1; i <= PublishMessageCount; i++) + { + var message = MessagePrefix + i; + Debug.WriteLine(String.Format("Publishing '{0}' to '{1}'", message, ChannelName)); + redisPublisher.PublishMessage(ChannelName, message); + } + } + }); + + Debug.WriteLine(String.Format("Started Listening On '{0}'", ChannelName)); + subscription.SubscribeToChannels(ChannelName); //blocking + } + + Debug.WriteLine("EOF"); + + /*Output: Started Listening On 'CHANNEL' Subscribed to 'CHANNEL' Begin publishing messages... @@ -88,65 +88,65 @@ Begin publishing messages... UnSubscribed from 'CHANNEL' EOF */ - } - - [Test] - public void Publish_5_messages_to_3_clients() - { - const int noOfClients = 3; - - for (var i = 1; i <= noOfClients; i++) - { - var clientNo = i; - ThreadPool.QueueUserWorkItem(x => - { - using (var redisConsumer = new RedisClient(TestConfig.SingleHost)) - using (var subscription = redisConsumer.CreateSubscription()) - { - var messagesReceived = 0; - subscription.OnSubscribe = channel => - { - Debug.WriteLine(String.Format("Client #{0} Subscribed to '{1}'", clientNo, channel)); - }; - subscription.OnUnSubscribe = channel => - { - Debug.WriteLine(String.Format("Client #{0} UnSubscribed from '{1}'", clientNo, channel)); - }; - subscription.OnMessage = (channel, msg) => - { - Debug.WriteLine(String.Format("Client #{0} Received '{1}' from channel '{2}'", - clientNo, msg, channel)); - - if (++messagesReceived == PublishMessageCount) - { - subscription.UnSubscribeFromAllChannels(); - } - }; - - Debug.WriteLine(String.Format("Client #{0} started Listening On '{1}'", clientNo, ChannelName)); - subscription.SubscribeToChannels(ChannelName); //blocking - } - - Debug.WriteLine(String.Format("Client #{0} EOF", clientNo)); - }); - } - - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - Thread.Sleep(500); - Debug.WriteLine("Begin publishing messages..."); - - for (var i = 1; i <= PublishMessageCount; i++) - { - var message = MessagePrefix + i; - Debug.WriteLine(String.Format("Publishing '{0}' to '{1}'", message, ChannelName)); - redisClient.PublishMessage(ChannelName, message); - } - } - - Thread.Sleep(500); - - /*Output: + } + + [Test] + public void Publish_5_messages_to_3_clients() + { + const int noOfClients = 3; + + for (var i = 1; i <= noOfClients; i++) + { + var clientNo = i; + ThreadPool.QueueUserWorkItem(x => + { + using (var redisConsumer = new RedisClient(TestConfig.SingleHost)) + using (var subscription = redisConsumer.CreateSubscription()) + { + var messagesReceived = 0; + subscription.OnSubscribe = channel => + { + Debug.WriteLine(String.Format("Client #{0} Subscribed to '{1}'", clientNo, channel)); + }; + subscription.OnUnSubscribe = channel => + { + Debug.WriteLine(String.Format("Client #{0} UnSubscribed from '{1}'", clientNo, channel)); + }; + subscription.OnMessage = (channel, msg) => + { + Debug.WriteLine(String.Format("Client #{0} Received '{1}' from channel '{2}'", + clientNo, msg, channel)); + + if (++messagesReceived == PublishMessageCount) + { + subscription.UnSubscribeFromAllChannels(); + } + }; + + Debug.WriteLine(String.Format("Client #{0} started Listening On '{1}'", clientNo, ChannelName)); + subscription.SubscribeToChannels(ChannelName); //blocking + } + + Debug.WriteLine(String.Format("Client #{0} EOF", clientNo)); + }); + } + + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + Thread.Sleep(500); + Debug.WriteLine("Begin publishing messages..."); + + for (var i = 1; i <= PublishMessageCount; i++) + { + var message = MessagePrefix + i; + Debug.WriteLine(String.Format("Publishing '{0}' to '{1}'", message, ChannelName)); + redisClient.PublishMessage(ChannelName, message); + } + } + + Thread.Sleep(500); + + /*Output: Client #1 started Listening On 'CHANNEL' Client #2 started Listening On 'CHANNEL' Client #1 Subscribed to 'CHANNEL' @@ -181,6 +181,6 @@ Begin publishing messages... Client #2 UnSubscribed from 'CHANNEL' Client #2 EOF */ - } - } + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Examples/TestData.cs b/tests/ServiceStack.Redis.Tests/Examples/TestData.cs new file mode 100644 index 00000000..9638709f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Examples/TestData.cs @@ -0,0 +1,79 @@ +using System; +using System.Collections.Generic; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Examples +{ + [TestFixture, Ignore("Integration")] + public class TestData + : RedisClientTestsBase + { + public class Article + { + public int Id { get; set; } + public string Title { get; set; } + public DateTime ModifiedDate { get; set; } + } + + [Test] + public void Create_test_data_for_all_types() + { + AddLists(); + AddSets(); + AddSortedSets(); + AddHashes(); + } + + private void AddLists() + { + var storeMembers = new List { "one", "two", "three", "four" }; + storeMembers.ForEach(x => Redis.AddItemToList("testlist", x)); + } + + private void AddSets() + { + var storeMembers = new List { "one", "two", "three", "four" }; + storeMembers.ForEach(x => Redis.AddItemToSet("testset", x)); + } + + private void AddHashes() + { + var stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + var stringIntMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + + stringMap.Each(x => Redis.SetEntryInHash("testhash", x.Key, x.Value)); + + var hash = Redis.Hashes["testhash"]; + stringIntMap.Each(x => hash.Add(x.Key, x.Value.ToString())); + } + + private void AddSortedSets() + { + var i = 0; + var storeMembers = new List { "one", "two", "three", "four" }; + storeMembers.ForEach(x => Redis.AddItemToSortedSet("testzset", x, i++)); + + var redisArticles = Redis.As
(); + + var articles = new[] + { + new Article {Id = 1, Title = "Article 1", ModifiedDate = new DateTime(2015, 01, 02)}, + new Article {Id = 2, Title = "Article 2", ModifiedDate = new DateTime(2015, 01, 01)}, + new Article {Id = 3, Title = "Article 3", ModifiedDate = new DateTime(2015, 01, 03)}, + }; + + redisArticles.StoreAll(articles); + + const string LatestArticlesSet = "urn:Article:modified"; + + foreach (var article in articles) + { + Redis.AddItemToSortedSet(LatestArticlesSet, article.Id.ToString(), article.ModifiedDate.Ticks); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs b/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs index 02ca9275..393ddc3e 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs @@ -2,51 +2,51 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] - public class TodoApp - { - [SetUp] - public void SetUp() - { - new RedisClient().FlushAll(); - } - - public class Todo - { - public long Id { get; set; } - public string Content { get; set; } - public int Order { get; set; } - public bool Done { get; set; } - } - - [Test] - public void Crud_TODO_App() - { - //Thread-safe client factory - var redisManager = new PooledRedisClientManager("localhost:6379"); - - redisManager.ExecAs(redisTodos => { - var todo = new Todo { - Id = redisTodos.GetNextSequence(), - Content = "Learn Redis", - Order = 1, - }; - - redisTodos.Store(todo); - - Todo savedTodo = redisTodos.GetById(todo.Id); - savedTodo.Done = true; - - redisTodos.Store(savedTodo); - - redisTodos.DeleteById(savedTodo.Id); - - var allTodos = redisTodos.GetAll(); - - Assert.That(allTodos.Count, Is.EqualTo(0)); - }); - } - - - } + [TestFixture, Ignore("Integration"), Category("Integration")] + public class TodoApp + { + [SetUp] + public void SetUp() + { + new RedisClient().FlushAll(); + } + + public class Todo + { + public long Id { get; set; } + public string Content { get; set; } + public int Order { get; set; } + public bool Done { get; set; } + } + + [Test] + public void Crud_TODO_App() + { + //Thread-safe client factory + var redisManager = new PooledRedisClientManager(TestConfig.SingleHostConnectionString); + + redisManager.ExecAs(redisTodos => + { + var todo = new Todo + { + Id = redisTodos.GetNextSequence(), + Content = "Learn Redis", + Order = 1, + }; + + redisTodos.Store(todo); + + Todo savedTodo = redisTodos.GetById(todo.Id); + savedTodo.Done = true; + + redisTodos.Store(savedTodo); + + redisTodos.DeleteById(savedTodo.Id); + + var allTodos = redisTodos.GetAll(); + + Assert.That(allTodos.Count, Is.EqualTo(0)); + }); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.Async.cs new file mode 100644 index 00000000..1889d193 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.Async.cs @@ -0,0 +1,250 @@ +using System.Collections.Generic; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System.Linq; +using System.Threading.Tasks; +using System; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public abstract class RedisClientHashTestsBaseAsync + { + private const string HashId = "testhash"; + + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + private IRedisHashAsync Hash; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + await client.FlushAllAsync(); + + redis = client.As(); + + Hash = redis.GetHash(HashId); + } + + private Dictionary CreateMap() + { + var listValues = Factory.CreateList(); + var map = new Dictionary(); + listValues.ForEach(x => map[x.ToString()] = x); + return map; + } + + private Dictionary CreateMap2() + { + var listValues = Factory.CreateList2(); + var map = new Dictionary(); + listValues.ForEach(x => map[x.ToString()] = x); + return map; + } + + [Test] + public async Task Can_SetItemInHash_and_GetAllFromHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_RemoveFromHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var firstKey = mapValues.First().Key; + + await redis.RemoveEntryFromHashAsync(Hash, firstKey); + + mapValues.Remove(firstKey); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_GetItemFromHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var firstKey = mapValues.First().Key; + + var hashValue = await redis.GetValueFromHashAsync(Hash, firstKey); + + Assert.That(hashValue, Is.EqualTo(mapValues[firstKey])); + } + + [Test] + public async Task Can_GetHashCount() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var hashCount = await redis.GetHashCountAsync(Hash); + + Assert.That(hashCount, Is.EqualTo(mapValues.Count)); + } + + [Test] + public async Task Does_HashContainsKey() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + Assert.That(await redis.HashContainsEntryAsync(Hash, existingMember), Is.True); + Assert.That(await redis.HashContainsEntryAsync(Hash, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_GetHashKeys() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var expectedKeys = mapValues.Map(x => x.Key); + + var hashKeys = await redis.GetHashKeysAsync(Hash); + + Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); + } + + [Test] + public async Task Can_GetHashValues() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var expectedValues = mapValues.Map(x => x.Value); + + var hashValues = await redis.GetHashValuesAsync(Hash); + + Assert.That(hashValues, Is.EquivalentTo(expectedValues)); + } + + [Test] + public async Task Can_enumerate_small_IDictionary_Hash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var members = new List(); + await foreach (var item in redis.GetHash(HashId)) + { + Assert.That(mapValues.ContainsKey(item.Key), Is.True); + members.Add(item.Key); + } + Assert.That(members.Count, Is.EqualTo(mapValues.Count)); + } + + [Test] + public async Task Can_Add_to_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_Clear_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + Assert.That(await hash.CountAsync(), Is.EqualTo(mapValues.Count)); + + await hash.ClearAsync(); + + Assert.That(await hash.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + Assert.That(await hash.ContainsKeyAsync(existingMember), Is.True); + Assert.That(await hash.ContainsKeyAsync(nonExistingMember), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + var firstKey = mapValues.First().Key; + mapValues.Remove(firstKey); + await hash.RemoveAsync(firstKey); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_SetItemInHashIfNotExists() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + var lastValue = mapValues.Last().Value; + + await redis.SetEntryInHashIfNotExistsAsync(Hash, existingMember, lastValue); + await redis.SetEntryInHashIfNotExistsAsync(Hash, nonExistingMember, lastValue); + + mapValues[nonExistingMember] = lastValue; + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_SetRangeInHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var newMapValues = CreateMap2(); + + await redis.SetRangeInHashAsync(Hash, newMapValues); + + newMapValues.Each(x => mapValues[x.Key] = x.Value); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.cs index f93a4aa4..0f6627ba 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.cs @@ -1,258 +1,258 @@ using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Common.Tests.Models; using ServiceStack.Redis.Generic; using System.Linq; namespace ServiceStack.Redis.Tests.Generic { - [TestFixture] - public abstract class RedisClientHashTestsBase - { - private const string HashId = "testhash"; - - protected abstract IModelFactory Factory { get; } - - private RedisClient client; - private IRedisTypedClient redis; - private IRedisHash Hash; - - [SetUp] - public void SetUp() - { - if (client != null) - { - client.Dispose(); - client = null; - } - client = new RedisClient(TestConfig.SingleHost); - client.FlushAll(); - - redis = client.GetTypedClient(); - - Hash = redis.GetHash(HashId); - } + [TestFixture] + public abstract class RedisClientHashTestsBase + { + private const string HashId = "testhash"; + + protected abstract IModelFactory Factory { get; } + + private RedisClient client; + private IRedisTypedClient redis; + private IRedisHash Hash; + + [SetUp] + public void SetUp() + { + if (client != null) + { + client.Dispose(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + client.FlushAll(); + + redis = client.As(); + + Hash = redis.GetHash(HashId); + } - private Dictionary CreateMap() - { - var listValues = Factory.CreateList(); - var map = new Dictionary(); - listValues.ForEach(x => map[x.ToString()] = x); - return map; - } + private Dictionary CreateMap() + { + var listValues = Factory.CreateList(); + var map = new Dictionary(); + listValues.ForEach(x => map[x.ToString()] = x); + return map; + } - private Dictionary CreateMap2() - { - var listValues = Factory.CreateList2(); - var map = new Dictionary(); - listValues.ForEach(x => map[x.ToString()] = x); - return map; - } - - [Test] - public void Can_SetItemInHash_and_GetAllFromHash() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var members = redis.GetAllEntriesFromHash(Hash); - Assert.That(members, Is.EquivalentTo(mapValues)); - } - - [Test] - public void Can_RemoveFromHash() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var firstKey = mapValues.First().Key; - - redis.RemoveEntryFromHash(Hash, firstKey); - - mapValues.Remove(firstKey); - - var members = redis.GetAllEntriesFromHash(Hash); - Assert.That(members, Is.EquivalentTo(mapValues)); - } - - [Test] - public void Can_GetItemFromHash() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var firstKey = mapValues.First().Key; - - var hashValue = redis.GetValueFromHash(Hash, firstKey); - - Assert.That(hashValue, Is.EqualTo(mapValues[firstKey])); - } - - [Test] - public void Can_GetHashCount() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var hashCount = redis.GetHashCount(Hash); - - Assert.That(hashCount, Is.EqualTo(mapValues.Count)); - } - - [Test] - public void Does_HashContainsKey() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var existingMember = mapValues.First().Key; - var nonExistingMember = existingMember + "notexists"; - - Assert.That(redis.HashContainsEntry(Hash, existingMember), Is.True); - Assert.That(redis.HashContainsEntry(Hash, nonExistingMember), Is.False); - } - - [Test] - public void Can_GetHashKeys() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var expectedKeys = mapValues.ConvertAll(x => x.Key); - - var hashKeys = redis.GetHashKeys(Hash); - - Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); - } - - [Test] - public void Can_GetHashValues() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var expectedValues = mapValues.ConvertAll(x => x.Value); - - var hashValues = redis.GetHashValues(Hash); - - Assert.That(hashValues, Is.EquivalentTo(expectedValues)); - } - - [Test] - public void Can_enumerate_small_IDictionary_Hash() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var members = new List(); - foreach (var item in redis.GetHash(HashId)) - { - Assert.That(mapValues.ContainsKey(item.Key), Is.True); - members.Add(item.Key); - } - Assert.That(members.Count, Is.EqualTo(mapValues.Count)); - } - - [Test] - public void Can_Add_to_IDictionary_Hash() - { - var hash = redis.GetHash(HashId); - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => hash.Add(k, v)); - - var members = redis.GetAllEntriesFromHash(Hash); - Assert.That(members, Is.EquivalentTo(mapValues)); - } + private Dictionary CreateMap2() + { + var listValues = Factory.CreateList2(); + var map = new Dictionary(); + listValues.ForEach(x => map[x.ToString()] = x); + return map; + } + + [Test] + public void Can_SetItemInHash_and_GetAllFromHash() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var members = redis.GetAllEntriesFromHash(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public void Can_RemoveFromHash() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var firstKey = mapValues.First().Key; + + redis.RemoveEntryFromHash(Hash, firstKey); + + mapValues.Remove(firstKey); + + var members = redis.GetAllEntriesFromHash(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public void Can_GetItemFromHash() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var firstKey = mapValues.First().Key; + + var hashValue = redis.GetValueFromHash(Hash, firstKey); + + Assert.That(hashValue, Is.EqualTo(mapValues[firstKey])); + } + + [Test] + public void Can_GetHashCount() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var hashCount = redis.GetHashCount(Hash); + + Assert.That(hashCount, Is.EqualTo(mapValues.Count)); + } + + [Test] + public void Does_HashContainsKey() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + Assert.That(redis.HashContainsEntry(Hash, existingMember), Is.True); + Assert.That(redis.HashContainsEntry(Hash, nonExistingMember), Is.False); + } + + [Test] + public void Can_GetHashKeys() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var expectedKeys = mapValues.Map(x => x.Key); + + var hashKeys = redis.GetHashKeys(Hash); + + Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); + } + + [Test] + public void Can_GetHashValues() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var expectedValues = mapValues.Map(x => x.Value); + + var hashValues = redis.GetHashValues(Hash); + + Assert.That(hashValues, Is.EquivalentTo(expectedValues)); + } + + [Test] + public void Can_enumerate_small_IDictionary_Hash() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var members = new List(); + foreach (var item in redis.GetHash(HashId)) + { + Assert.That(mapValues.ContainsKey(item.Key), Is.True); + members.Add(item.Key); + } + Assert.That(members.Count, Is.EqualTo(mapValues.Count)); + } - [Test] - public void Can_Clear_IDictionary_Hash() - { - var hash = redis.GetHash(HashId); - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => hash.Add(k, v)); - - Assert.That(hash.Count, Is.EqualTo(mapValues.Count)); - - hash.Clear(); - - Assert.That(hash.Count, Is.EqualTo(0)); - } - - [Test] - public void Can_Test_Contains_in_IDictionary_Hash() - { - var hash = redis.GetHash(HashId); - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => hash.Add(k, v)); - - var existingMember = mapValues.First().Key; - var nonExistingMember = existingMember + "notexists"; - - Assert.That(hash.ContainsKey(existingMember), Is.True); - Assert.That(hash.ContainsKey(nonExistingMember), Is.False); - } - - [Test] - public void Can_Remove_value_from_IDictionary_Hash() - { - var hash = redis.GetHash(HashId); - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => hash.Add(k, v)); - - var firstKey = mapValues.First().Key; - mapValues.Remove(firstKey); - hash.Remove(firstKey); - - var members = redis.GetAllEntriesFromHash(Hash); - Assert.That(members, Is.EquivalentTo(mapValues)); - } - - private static Dictionary ToStringMap(Dictionary stringIntMap) - { - var map = new Dictionary(); - foreach (var kvp in stringIntMap) - { - map[kvp.Key] = kvp.Value.ToString(); - } - return map; - } - - [Test] - public void Can_SetItemInHashIfNotExists() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var existingMember = mapValues.First().Key; - var nonExistingMember = existingMember + "notexists"; - - var lastValue = mapValues.Last().Value; - - redis.SetEntryInHashIfNotExists(Hash, existingMember, lastValue); - redis.SetEntryInHashIfNotExists(Hash, nonExistingMember, lastValue); - - mapValues[nonExistingMember] = lastValue; - - var members = redis.GetAllEntriesFromHash(Hash); - Assert.That(members, Is.EquivalentTo(mapValues)); - } - - [Test] - public void Can_SetRangeInHash() - { - var mapValues = CreateMap(); - mapValues.ForEach((k,v) => redis.SetEntryInHash(Hash, k, v)); - - var newMapValues = CreateMap2(); - - redis.SetRangeInHash(Hash, newMapValues); - - newMapValues.ForEach(x => mapValues[x.Key] = x.Value); - - var members = redis.GetAllEntriesFromHash(Hash); - Assert.That(members, Is.EquivalentTo(mapValues)); - } - } + [Test] + public void Can_Add_to_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => hash.Add(k, v)); + + var members = redis.GetAllEntriesFromHash(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public void Can_Clear_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => hash.Add(k, v)); + + Assert.That(hash.Count, Is.EqualTo(mapValues.Count)); + + hash.Clear(); + + Assert.That(hash.Count, Is.EqualTo(0)); + } + + [Test] + public void Can_Test_Contains_in_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => hash.Add(k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + Assert.That(hash.ContainsKey(existingMember), Is.True); + Assert.That(hash.ContainsKey(nonExistingMember), Is.False); + } + + [Test] + public void Can_Remove_value_from_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => hash.Add(k, v)); + + var firstKey = mapValues.First().Key; + mapValues.Remove(firstKey); + hash.Remove(firstKey); + + var members = redis.GetAllEntriesFromHash(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + private static Dictionary ToStringMap(Dictionary stringIntMap) + { + var map = new Dictionary(); + foreach (var kvp in stringIntMap) + { + map[kvp.Key] = kvp.Value.ToString(); + } + return map; + } + + [Test] + public void Can_SetItemInHashIfNotExists() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + var lastValue = mapValues.Last().Value; + + redis.SetEntryInHashIfNotExists(Hash, existingMember, lastValue); + redis.SetEntryInHashIfNotExists(Hash, nonExistingMember, lastValue); + + mapValues[nonExistingMember] = lastValue; + + var members = redis.GetAllEntriesFromHash(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public void Can_SetRangeInHash() + { + var mapValues = CreateMap(); + mapValues.ForEach((k, v) => redis.SetEntryInHash(Hash, k, v)); + + var newMapValues = CreateMap2(); + + redis.SetRangeInHash(Hash, newMapValues); + + newMapValues.Each(x => mapValues[x.Key] = x.Value); + + var members = redis.GetAllEntriesFromHash(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.Async.cs new file mode 100644 index 00000000..0e7d7216 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.Async.cs @@ -0,0 +1,80 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Support; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisClientHashTestsModelWithFieldsOfDifferentTypesAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsStringAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsShipperAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsIntAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = new IntFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsCustomTypeAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + //public class RedisClientHashTestsDateTimeAsync + // : RedisClientHashTestsBaseAsync + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); + + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.cs index 3550b2a3..4e3b1518 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.cs @@ -6,75 +6,75 @@ namespace ServiceStack.Redis.Tests.Generic { [TestFixture] - public class RedisClientHashTestsModelWithFieldsOfDifferentTypes - : RedisClientHashTestsBase - { - private readonly IModelFactory factory = - new ModelWithFieldsOfDifferentTypesFactory(); + public class RedisClientHashTestsModelWithFieldsOfDifferentTypes + : RedisClientHashTestsBase + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientHashTestsString - : RedisClientHashTestsBase - { - private readonly IModelFactory factory = new BuiltInsFactory(); + public class RedisClientHashTestsString + : RedisClientHashTestsBase + { + private readonly IModelFactory factory = new BuiltInsFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientHashTestsShipper - : RedisClientHashTestsBase - { - private readonly IModelFactory factory = new ShipperFactory(); + public class RedisClientHashTestsShipper + : RedisClientHashTestsBase + { + private readonly IModelFactory factory = new ShipperFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientHashTestsInt - : RedisClientHashTestsBase - { - private readonly IModelFactory factory = new IntFactory(); + public class RedisClientHashTestsInt + : RedisClientHashTestsBase + { + private readonly IModelFactory factory = new IntFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientHashTestsCustomType - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = new CustomTypeFactory(); + public class RedisClientHashTestsCustomType + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = new CustomTypeFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } - //public class RedisClientHashTestsDateTime - // : RedisClientHashTestsBase - //{ - // private readonly IModelFactory factory = new DateTimeFactory(); + //public class RedisClientHashTestsDateTime + // : RedisClientHashTestsBase + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); - // protected override IModelFactory Factory - // { - // get { return factory; } - // } - //} + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.Async.cs new file mode 100644 index 00000000..42f92e1e --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.Async.cs @@ -0,0 +1,59 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Tests.Support; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public class RedisClientListTestExtraAsync + { + const string ListId = "testlist"; + // const string ListId2 = "testlist2"; + private IRedisListAsync List; + // private IRedisListAsync List2; + + + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected IModelFactory Factory { get { return factory; } } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + await client.FlushAllAsync(); + + redis = client.As(); + + List = redis.Lists[ListId]; + // List2 = redis.Lists[ListId2]; + } + + [Test] + public async Task Can_Remove_value_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + var equalItem = new CustomType() { CustomId = 4 }; + storeMembers.Remove(equalItem); + await List.RemoveAsync(equalItem); + + var members = await List.ToListAsync(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.cs index a8d78883..bedf5473 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.cs @@ -1,5 +1,6 @@ -using NUnit.Framework; -using ServiceStack.Common.Extensions; +using System.Linq; +using NUnit.Framework; +using ServiceStack.Common; using ServiceStack.Common.Tests.Models; using ServiceStack.Redis.Generic; using ServiceStack.Redis.Tests.Support; @@ -34,7 +35,7 @@ public void SetUp() client = new RedisClient(TestConfig.SingleHost); client.FlushAll(); - redis = client.GetTypedClient(); + redis = client.As(); List = redis.Lists[ListId]; List2 = redis.Lists[ListId2]; @@ -50,7 +51,7 @@ public void Can_Remove_value_from_IList() storeMembers.Remove(equalItem); List.Remove(equalItem); - var members = List.ToList(); + var members = List.ToList(); Factory.AssertListsAreEqual(members, storeMembers); } diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.Async.cs new file mode 100644 index 00000000..58e42b5d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.Async.cs @@ -0,0 +1,331 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public abstract class RedisClientListTestsBaseAsync + { + const string ListId = "testlist"; + const string ListId2 = "testlist2"; + private IRedisListAsync List; + private IRedisListAsync List2; + + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + await client.FlushAllAsync(); + + redis = client.As(); + + List = redis.Lists[ListId]; + List2 = redis.Lists[ListId2]; + } + + [Test] + public async Task Can_AddToList_and_GetAllFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var members = await redis.GetAllItemsFromListAsync(List); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_GetListCount() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var listCount = await redis.GetListCountAsync(List); + + Assert.That(listCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Can_GetItemFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var storeMember3 = storeMembers[2]; + var item3 = await redis.GetItemFromListAsync(List, 2); + + Factory.AssertIsEqual(item3, storeMember3); + } + + [Test] + public async Task Can_SetItemInList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + storeMembers[2] = Factory.NonExistingValue; + await redis.SetItemInListAsync(List, 2, Factory.NonExistingValue); + + var members = await redis.GetAllItemsFromListAsync(List); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_PopFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var lastValue = await redis.PopItemFromListAsync(List); + + Factory.AssertIsEqual(lastValue, storeMembers[storeMembers.Count - 1]); + } + + [Test] + public async Task Can_BlockingDequeueItemFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var item1 = await redis.BlockingDequeueItemFromListAsync(List, new TimeSpan(0, 0, 1)); + + Factory.AssertIsEqual(item1, (T)storeMembers.First()); + } + + [Test] + public async Task Can_BlockingDequeueItemFromList_Timeout() + { + var item1 = await redis.BlockingDequeueItemFromListAsync(List, new TimeSpan(0, 0, 1)); + Assert.AreEqual(item1, default(T)); + } + + [Test] + public async Task Can_DequeueFromList() + { + + var queue = new Queue(); + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => queue.Enqueue(x)); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var item1 = await redis.DequeueItemFromListAsync(List); + + Factory.AssertIsEqual(item1, queue.Dequeue()); + } + + [Test] + public async Task PopAndPushSameAsDequeue() + { + var queue = new Queue(); + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => queue.Enqueue(x)); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var item1 = await redis.PopAndPushItemBetweenListsAsync(List, List2); + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task Can_ClearList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(storeMembers.Count)); + + await redis.RemoveAllFromListAsync(List); + count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(0)); + + } + + [Test] + public async Task Can_ClearListWithOneItem() + { + var storeMembers = Factory.CreateList(); + await redis.EnqueueItemOnListAsync(List, storeMembers[0]); + + var count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(1)); + + await redis.RemoveAllFromListAsync(List); + count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(0)); + } + + [Test] + public async Task Can_MoveBetweenLists() + { + var list1Members = Factory.CreateList(); + var list2Members = Factory.CreateList2(); + var lastItem = list1Members[list1Members.Count - 1]; + + await list1Members.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + await list2Members.ForEachAsync(x => redis.AddItemToListAsync(List2, x)); + + list1Members.Remove(lastItem); + list2Members.Insert(0, lastItem); + await redis.PopAndPushItemBetweenListsAsync(List, List2); + + var readList1 = await redis.GetAllItemsFromListAsync(List); + var readList2 = await redis.GetAllItemsFromListAsync(List2); + + Factory.AssertListsAreEqual(readList1, list1Members); + Factory.AssertListsAreEqual(readList2, list2Members); + } + + + [Test] + public async Task Can_enumerate_small_list() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var readMembers = new List(); + await foreach (var item in redis.Lists[ListId]) + { + readMembers.Add(item); + } + Factory.AssertListsAreEqual(readMembers, storeMembers); + } + + [Test] + public async Task Can_enumerate_large_list() + { + if (TestConfig.IgnoreLongTests) return; + + const int listSize = 2500; + + await listSize.TimesAsync(x => redis.AddItemToListAsync(List, Factory.CreateInstance(x))); + + var i = 0; + await foreach (var item in List) + { + Factory.AssertIsEqual(item, Factory.CreateInstance(i++)); + } + } + + [Test] + public async Task Can_Add_to_IList() + { + var storeMembers = Factory.CreateList(); + var list = redis.Lists[ListId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + var members = await list.ToListAsync(); + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_Clear_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + Assert.That(await List.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await List.ClearAsync(); + + Assert.That(await List.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + Assert.That(await List.ContainsAsync(Factory.ExistingValue), Is.True); + Assert.That(await List.ContainsAsync(Factory.NonExistingValue), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + storeMembers.Remove(Factory.ExistingValue); + await List.RemoveAsync(Factory.ExistingValue); + + var members = await List.ToListAsync(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_RemoveAt_value_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + storeMembers.RemoveAt(2); + await List.RemoveAtAsync(2); + + var members = await List.ToListAsync(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_get_default_index_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + for (var i = 0; i < storeMembers.Count; i++) + { + Factory.AssertIsEqual(await List.ElementAtAsync(i), storeMembers[i]); + } + } + + [Test] + public async Task Can_test_for_IndexOf_in_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + foreach (var item in storeMembers) + { + Assert.That(await List.IndexOfAsync(item), Is.EqualTo(storeMembers.IndexOf(item))); + } + } + + + [Test] + public async Task Can_GetRangeFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + //in SetUp(): List = redis.Lists["testlist"]; + //alias for: redis.GetRangeFromList(redis.Lists["testlist"], 1, 3); + var range = await List.GetRangeAsync(1, 3); + var expected = storeMembers.Skip(1).Take(3).ToList(); + + //Uncomment to view list contents + //Debug.WriteLine(range.Dump()); + //Debug.WriteLine(expected.Dump()); + + Factory.AssertListsAreEqual(range, expected); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.cs index b48c6b62..62b9ba04 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.cs @@ -2,112 +2,113 @@ using System.Collections.Generic; using System.Linq; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Common.Tests.Models; using ServiceStack.Redis.Generic; using ServiceStack.Text; +using ServiceStack.Redis.Tests.Support; namespace ServiceStack.Redis.Tests.Generic { - [TestFixture] - public abstract class RedisClientListTestsBase - { - const string ListId = "testlist"; - const string ListId2 = "testlist2"; - private IRedisList List; - private IRedisList List2; + [TestFixture] + public abstract class RedisClientListTestsBase + { + const string ListId = "testlist"; + const string ListId2 = "testlist2"; + private IRedisList List; + private IRedisList List2; - protected abstract IModelFactory Factory { get; } + protected abstract IModelFactory Factory { get; } - private RedisClient client; - private IRedisTypedClient redis; + private RedisClient client; + private IRedisTypedClient redis; - [SetUp] - public void SetUp() - { - if (client != null) - { - client.Dispose(); - client = null; - } - client = new RedisClient(TestConfig.SingleHost); - client.FlushAll(); - - redis = client.GetTypedClient(); - - List = redis.Lists[ListId]; - List2 = redis.Lists[ListId2]; - } + [SetUp] + public void SetUp() + { + if (client != null) + { + client.Dispose(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + client.FlushAll(); + + redis = client.As(); + + List = redis.Lists[ListId]; + List2 = redis.Lists[ListId2]; + } - [Test] - public void Can_AddToList_and_GetAllFromList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); + [Test] + public void Can_AddToList_and_GetAllFromList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); - var members = redis.GetAllItemsFromList(List); + var members = redis.GetAllItemsFromList(List); - Factory.AssertListsAreEqual(members, storeMembers); - } + Factory.AssertListsAreEqual(members, storeMembers); + } - [Test] - public void Can_GetListCount() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); + [Test] + public void Can_GetListCount() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); - var listCount = redis.GetListCount(List); + var listCount = redis.GetListCount(List); - Assert.That(listCount, Is.EqualTo(storeMembers.Count)); - } + Assert.That(listCount, Is.EqualTo(storeMembers.Count)); + } - [Test] - public void Can_GetItemFromList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); + [Test] + public void Can_GetItemFromList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); - var storeMember3 = storeMembers[2]; - var item3 = redis.GetItemFromList(List, 2); + var storeMember3 = storeMembers[2]; + var item3 = redis.GetItemFromList(List, 2); - Factory.AssertIsEqual(item3, storeMember3); - } + Factory.AssertIsEqual(item3, storeMember3); + } - [Test] - public void Can_SetItemInList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); + [Test] + public void Can_SetItemInList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); - storeMembers[2] = Factory.NonExistingValue; - redis.SetItemInList(List, 2, Factory.NonExistingValue); + storeMembers[2] = Factory.NonExistingValue; + redis.SetItemInList(List, 2, Factory.NonExistingValue); - var members = redis.GetAllItemsFromList(List); + var members = redis.GetAllItemsFromList(List); - Factory.AssertListsAreEqual(members, storeMembers); - } + Factory.AssertListsAreEqual(members, storeMembers); + } - [Test] - public void Can_PopFromList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); + [Test] + public void Can_PopFromList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); - var lastValue = redis.PopItemFromList(List); + var lastValue = redis.PopItemFromList(List); - Factory.AssertIsEqual(lastValue, storeMembers[storeMembers.Count - 1]); - } + Factory.AssertIsEqual(lastValue, storeMembers[storeMembers.Count - 1]); + } - [Test] + [Test] public void Can_BlockingDequeueItemFromList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.EnqueueItemOnList(List, x)); var item1 = redis.BlockingDequeueItemFromList(List, new TimeSpan(0, 0, 1)); - Factory.AssertIsEqual(item1, (T)storeMembers.First()); - } + Factory.AssertIsEqual(item1, (T)storeMembers.First()); + } [Test] public void Can_BlockingDequeueItemFromList_Timeout() @@ -171,162 +172,162 @@ public void Can_ClearListWithOneItem() Assert.That(count, Is.EqualTo(0)); } - [Test] - public void Can_MoveBetweenLists() - { - var list1Members = Factory.CreateList(); - var list2Members = Factory.CreateList2(); - var lastItem = list1Members[list1Members.Count - 1]; - - list1Members.ForEach(x => redis.AddItemToList(List, x)); - list2Members.ForEach(x => redis.AddItemToList(List2, x)); + [Test] + public void Can_MoveBetweenLists() + { + var list1Members = Factory.CreateList(); + var list2Members = Factory.CreateList2(); + var lastItem = list1Members[list1Members.Count - 1]; + + list1Members.ForEach(x => redis.AddItemToList(List, x)); + list2Members.ForEach(x => redis.AddItemToList(List2, x)); - list1Members.Remove(lastItem); - list2Members.Insert(0, lastItem); - redis.PopAndPushItemBetweenLists(List, List2); + list1Members.Remove(lastItem); + list2Members.Insert(0, lastItem); + redis.PopAndPushItemBetweenLists(List, List2); - var readList1 = redis.GetAllItemsFromList(List); - var readList2 = redis.GetAllItemsFromList(List2); + var readList1 = redis.GetAllItemsFromList(List); + var readList2 = redis.GetAllItemsFromList(List2); - Factory.AssertListsAreEqual(readList1, list1Members); - Factory.AssertListsAreEqual(readList2, list2Members); - } + Factory.AssertListsAreEqual(readList1, list1Members); + Factory.AssertListsAreEqual(readList2, list2Members); + } + + + [Test] + public void Can_enumerate_small_list() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); + + var readMembers = new List(); + foreach (var item in redis.Lists[ListId]) + { + readMembers.Add(item); + } + Factory.AssertListsAreEqual(readMembers, storeMembers); + } + [Test] + public void Can_enumerate_large_list() + { + if (TestConfig.IgnoreLongTests) return; + + const int listSize = 2500; + + listSize.Times(x => redis.AddItemToList(List, Factory.CreateInstance(x))); + + var i = 0; + foreach (var item in List) + { + Factory.AssertIsEqual(item, Factory.CreateInstance(i++)); + } + } + + [Test] + public void Can_Add_to_IList() + { + var storeMembers = Factory.CreateList(); + var list = redis.Lists[ListId]; + storeMembers.ForEach(list.Add); + + var members = list.ToList(); + Factory.AssertListsAreEqual(members, storeMembers); + } - [Test] - public void Can_enumerate_small_list() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); - - var readMembers = new List(); - foreach (var item in redis.Lists[ListId]) - { - readMembers.Add(item); - } - Factory.AssertListsAreEqual(readMembers, storeMembers); - } - - [Test] - public void Can_enumerate_large_list() - { - if (TestConfig.IgnoreLongTests) return; - - const int listSize = 2500; - - listSize.Times(x => redis.AddItemToList(List, Factory.CreateInstance(x))); - - var i = 0; - foreach (var item in List) - { - Factory.AssertIsEqual(item, Factory.CreateInstance(i++)); - } - } - - [Test] - public void Can_Add_to_IList() - { - var storeMembers = Factory.CreateList(); - var list = redis.Lists[ListId]; - storeMembers.ForEach(list.Add); - - var members = list.ToList(); - Factory.AssertListsAreEqual(members, storeMembers); - } - - [Test] - public void Can_Clear_IList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(List.Add); - - Assert.That(List.Count, Is.EqualTo(storeMembers.Count)); - - List.Clear(); - - Assert.That(List.Count, Is.EqualTo(0)); - } - - [Test] - public void Can_Test_Contains_in_IList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(List.Add); - - Assert.That(List.Contains(Factory.ExistingValue), Is.True); - Assert.That(List.Contains(Factory.NonExistingValue), Is.False); - } - - [Test] - public void Can_Remove_value_from_IList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(List.Add); - - storeMembers.Remove(Factory.ExistingValue); - List.Remove(Factory.ExistingValue); - - var members = List.ToList(); - - Factory.AssertListsAreEqual(members, storeMembers); - } - - [Test] - public void Can_RemoveAt_value_from_IList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(List.Add); - - storeMembers.RemoveAt(2); - List.RemoveAt(2); - - var members = List.ToList(); - - Factory.AssertListsAreEqual(members, storeMembers); - } - - [Test] - public void Can_get_default_index_from_IList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(List.Add); - - for (var i=0; i < storeMembers.Count; i++) - { - Factory.AssertIsEqual(List[i], storeMembers[i]); - } - } - - [Test] - public void Can_test_for_IndexOf_in_IList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(List.Add); - - foreach (var item in storeMembers) - { - Assert.That(List.IndexOf(item), Is.EqualTo(storeMembers.IndexOf(item))); - } - } - - - [Test] - public void Can_GetRangeFromList() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToList(List, x)); - - //in SetUp(): List = redis.Lists["testlist"]; - //alias for: redis.GetRangeFromList(redis.Lists["testlist"], 1, 3); - var range = List.GetRange(1, 3); - var expected = storeMembers.Skip(1).Take(3).ToList(); - - //Uncomment to view list contents - //Debug.WriteLine(range.Dump()); - //Debug.WriteLine(expected.Dump()); - - Factory.AssertListsAreEqual(range, expected); - } + [Test] + public void Can_Clear_IList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(List.Add); + + Assert.That(List.Count, Is.EqualTo(storeMembers.Count)); + + List.Clear(); + + Assert.That(List.Count, Is.EqualTo(0)); + } + + [Test] + public void Can_Test_Contains_in_IList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(List.Add); + + Assert.That(List.Contains(Factory.ExistingValue), Is.True); + Assert.That(List.Contains(Factory.NonExistingValue), Is.False); + } + + [Test] + public void Can_Remove_value_from_IList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(List.Add); + + storeMembers.Remove(Factory.ExistingValue); + List.Remove(Factory.ExistingValue); + + var members = List.ToList(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public void Can_RemoveAt_value_from_IList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(List.Add); + + storeMembers.RemoveAt(2); + List.RemoveAt(2); + + var members = List.ToList(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public void Can_get_default_index_from_IList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(List.Add); + + for (var i = 0; i < storeMembers.Count; i++) + { + Factory.AssertIsEqual(List[i], storeMembers[i]); + } + } + + [Test] + public void Can_test_for_IndexOf_in_IList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(List.Add); + + foreach (var item in storeMembers) + { + Assert.That(List.IndexOf(item), Is.EqualTo(storeMembers.IndexOf(item))); + } + } + + + [Test] + public void Can_GetRangeFromList() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToList(List, x)); + + //in SetUp(): List = redis.Lists["testlist"]; + //alias for: redis.GetRangeFromList(redis.Lists["testlist"], 1, 3); + var range = List.GetRange(1, 3); + var expected = storeMembers.Skip(1).Take(3).ToList(); + + //Uncomment to view list contents + //Debug.WriteLine(range.Dump()); + //Debug.WriteLine(expected.Dump()); + + Factory.AssertListsAreEqual(range, expected); + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.Async.cs new file mode 100644 index 00000000..00d6e80f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.Async.cs @@ -0,0 +1,91 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Support; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisClientListTestsModelWithFieldsOfDifferentTypesAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsStringAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsShipperAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsIntAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new IntFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsCustomTypeAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientlistTestCustomType_FailingAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + //public class RedisClientListTestsDateTimeAsync + // : RedisClientListTestsBaseAsync + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); + + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs index 2766285c..5d8e0474 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs @@ -5,65 +5,66 @@ namespace ServiceStack.Redis.Tests.Generic { - public class RedisClientListTestsModelWithFieldsOfDifferentTypes - : RedisClientListTestsBase - { - private readonly IModelFactory factory = - new ModelWithFieldsOfDifferentTypesFactory(); + // TODO: error, missing fixture? + public class RedisClientListTestsModelWithFieldsOfDifferentTypes + : RedisClientListTestsBase + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientListTestsString - : RedisClientListTestsBase - { - private readonly IModelFactory factory = new BuiltInsFactory(); + public class RedisClientListTestsString + : RedisClientListTestsBase + { + private readonly IModelFactory factory = new BuiltInsFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientListTestsShipper - : RedisClientListTestsBase - { - private readonly IModelFactory factory = new ShipperFactory(); + public class RedisClientListTestsShipper + : RedisClientListTestsBase + { + private readonly IModelFactory factory = new ShipperFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientListTestsInt - : RedisClientListTestsBase - { - private readonly IModelFactory factory = new IntFactory(); + public class RedisClientListTestsInt + : RedisClientListTestsBase + { + private readonly IModelFactory factory = new IntFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] - public class RedisClientListTestsCustomType - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = new CustomTypeFactory(); + public class RedisClientListTestsCustomType + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = new CustomTypeFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] public class RedisClientlistTestCustomType_Failing @@ -77,14 +78,14 @@ protected override IModelFactory Factory } } - //public class RedisClientListTestsDateTime - // : RedisClientListTestsBase - //{ - // private readonly IModelFactory factory = new DateTimeFactory(); + //public class RedisClientListTestsDateTime + // : RedisClientListTestsBase + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); - // protected override IModelFactory Factory - // { - // get { return factory; } - // } - //} + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.Async.cs new file mode 100644 index 00000000..7f3028fd --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.Async.cs @@ -0,0 +1,344 @@ +using System.Collections.Generic; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System.Linq; +using System.Threading.Tasks; +using System; + +namespace ServiceStack.Redis.Tests.Generic +{ + [Category("Async")] + public abstract class RedisClientSetTestsBaseAsync + { + private const string SetId = "testset"; + private const string SetId2 = "testset2"; + private const string SetId3 = "testset3"; + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + private IRedisSetAsync Set; + private IRedisSetAsync Set2; + private IRedisSetAsync Set3; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await client.FlushAllAsync(); + + redis = client.As(); + + Set = redis.Sets[SetId]; + Set2 = redis.Sets[SetId2]; + Set3 = redis.Sets[SetId3]; + } + + [Test] + public async Task Can_AddToSet_and_GetAllFromSet() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var members = await redis.GetAllItemsFromSetAsync(Set); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveFromSet() + { + var storeMembers = Factory.CreateList(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + await redis.RemoveItemFromSetAsync(Set, Factory.ExistingValue); + + storeMembers.Remove(Factory.ExistingValue); + + var members = await redis.GetAllItemsFromSetAsync(Set); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_PopFromSet() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var member = await redis.PopItemFromSetAsync(Set); + + Assert.That(storeMembers.Contains(member), Is.True); + } + + [Test] + public async Task Can_MoveBetweenSets() + { + var fromSetMembers = Factory.CreateList(); + var toSetMembers = Factory.CreateList2(); + + await fromSetMembers.ForEachAsync(x => redis.AddItemToSetAsync(Set, x)); + await toSetMembers.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + await redis.MoveBetweenSetsAsync(Set, Set2, Factory.ExistingValue); + + fromSetMembers.Remove(Factory.ExistingValue); + toSetMembers.Add(Factory.ExistingValue); + + var readFromSetId = await redis.GetAllItemsFromSetAsync(Set); + var readToSetId = await redis.GetAllItemsFromSetAsync(Set2); + + Assert.That(readFromSetId, Is.EquivalentTo(fromSetMembers)); + Assert.That(readToSetId, Is.EquivalentTo(toSetMembers)); + } + + [Test] + public async Task Can_GetSetCount() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var setCount = await redis.GetSetCountAsync(Set); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Does_SetContainsValue() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + Assert.That(await redis.SetContainsItemAsync(Set, Factory.ExistingValue), Is.True); + Assert.That(await redis.SetContainsItemAsync(Set, Factory.NonExistingValue), Is.False); + } + + [Test] + public async Task Can_IntersectBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + storeMembers.Add(storeMembers2.First()); + storeMembers2.Add(storeMembers.First()); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + var intersectingMembers = await redis.GetIntersectFromSetsAsync(new[] { Set, Set2 }); + + var intersect = (await Set.ToListAsync()).Intersect((await Set2.ToListAsync())).ToList(); + + Assert.That(intersectingMembers, Is.EquivalentTo(intersect)); + } + + [Test] + public async Task Can_Store_IntersectBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + await redis.StoreIntersectFromSetsAsync(Set3, new[] { Set, Set2 }); + + var intersect = (await Set.ToListAsync()).Intersect(await Set2.ToListAsync()).ToList(); + + Assert.That(await Set3.ToListAsync(), Is.EquivalentTo(intersect)); + } + + [Test] + public async Task Can_UnionBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + var unionMembers = await redis.GetUnionFromSetsAsync(new[] { Set, Set2 }); + + var union = (await Set.ToListAsync()).Union(await Set2.ToListAsync()).ToList(); + + Assert.That(unionMembers, Is.EquivalentTo(union)); + } + + [Test] + public async Task Can_Store_UnionBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + await redis.StoreUnionFromSetsAsync(Set3, new[] { Set, Set2 }); + + var union = (await Set.ToListAsync()).Union((await Set2.ToListAsync())).ToList(); + + Assert.That(await Set3.ToListAsync(), Is.EquivalentTo(union)); + } + + [Test] + public async Task Can_DiffBetweenSets() + { + var storeMembers = Factory.CreateList(); + storeMembers.Add(Factory.CreateInstance(1)); + + var storeMembers2 = Factory.CreateList2(); + storeMembers2.Insert(0, Factory.CreateInstance(4)); + + var storeMembers3 = new List { + Factory.CreateInstance(1), + Factory.CreateInstance(5), + Factory.CreateInstance(7), + Factory.CreateInstance(11), + }; + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + await storeMembers3.ForEachAsync(x => redis.AddItemToSetAsync(Set3, x)); + + var diffMembers = await redis.GetDifferencesFromSetAsync(Set, new[] { Set2, Set3 }); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); + } + + [Test] + public async Task Can_Store_DiffBetweenSets() + { + var storeMembers = Factory.CreateList(); + storeMembers.Add(Factory.CreateInstance(1)); + + var storeMembers2 = Factory.CreateList2(); + storeMembers2.Insert(0, Factory.CreateInstance(4)); + + var storeMembers3 = new List { + Factory.CreateInstance(1), + Factory.CreateInstance(5), + Factory.CreateInstance(7), + Factory.CreateInstance(11), + }; + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + await storeMembers3.ForEachAsync(x => redis.AddItemToSetAsync(Set3, x)); + + var storeSet = redis.Sets["testdiffsetstore"]; + + await redis.StoreDifferencesFromSetAsync(storeSet, Set, new[] { Set2, Set3 }); + + Assert.That(await storeSet.ToListAsync(), Is.EquivalentTo( + new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); + + } + + [Test] + public async Task Can_GetRandomEntryFromSet() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var randomEntry = await redis.GetRandomItemFromSetAsync(Set); + + Assert.That(storeMembers.Contains(randomEntry), Is.True); + } + + + [Test] + public async Task Can_enumerate_small_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var members = new List(); + await foreach (var item in Set) + { + members.Add(item); + } + + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + var storeMembers = new List(); + await setSize.TimesAsync(async x => + { + await redis.AddItemToSetAsync(Set, Factory.CreateInstance(x)); + storeMembers.Add(Factory.CreateInstance(x)); + }); + + var members = new List(); + await foreach (var item in Set) + { + members.Add(item); + } + members.Sort((x, y) => x.GetId().ToString().CompareTo(y.GetId().ToString())); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var members = await Set.ToListAsync(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Clear_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + Assert.That(await Set.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await Set.ClearAsync(); + + Assert.That(await Set.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + Assert.That(await Set.ContainsAsync(Factory.ExistingValue), Is.True); + Assert.That(await Set.ContainsAsync(Factory.NonExistingValue), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + storeMembers.Remove(Factory.ExistingValue); + await Set.RemoveAsync(Factory.ExistingValue); + + var members = await Set.ToListAsync(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.cs index c0a6d040..8fb23eb5 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.cs @@ -1,342 +1,341 @@ using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common.Extensions; using ServiceStack.Common.Tests.Models; -using ServiceStack.Common.Utils; using ServiceStack.Redis.Generic; using System.Linq; namespace ServiceStack.Redis.Tests.Generic { - public abstract class RedisClientSetTestsBase - { - private const string SetId = "testset"; - private const string SetId2 = "testset2"; - private const string SetId3 = "testset3"; - protected abstract IModelFactory Factory { get; } - - private RedisClient client; - private IRedisTypedClient redis; - private IRedisSet Set; - private IRedisSet Set2; - private IRedisSet Set3; - - [SetUp] - public void SetUp() - { - if (client != null) - { - client.Dispose(); - client = null; - } - client = new RedisClient(TestConfig.SingleHost); - client.FlushAll(); - - redis = client.GetTypedClient(); + public abstract class RedisClientSetTestsBase + { + private const string SetId = "testset"; + private const string SetId2 = "testset2"; + private const string SetId3 = "testset3"; + protected abstract IModelFactory Factory { get; } + + private RedisClient client; + private IRedisTypedClient redis; + private IRedisSet Set; + private IRedisSet Set2; + private IRedisSet Set3; + + [SetUp] + public void SetUp() + { + if (client != null) + { + client.Dispose(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + client.FlushAll(); + + redis = client.As(); - Set = redis.Sets[SetId]; - Set2 = redis.Sets[SetId2]; - Set3 = redis.Sets[SetId3]; - } + Set = redis.Sets[SetId]; + Set2 = redis.Sets[SetId2]; + Set3 = redis.Sets[SetId3]; + } - [Test] - public void Can_AddToSet_and_GetAllFromSet() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + [Test] + public void Can_AddToSet_and_GetAllFromSet() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - var members = redis.GetAllItemsFromSet(Set); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } + var members = redis.GetAllItemsFromSet(Set); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } - [Test] - public void Can_RemoveFromSet() - { - var storeMembers = Factory.CreateList(); + [Test] + public void Can_RemoveFromSet() + { + var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - redis.RemoveItemFromSet(Set, Factory.ExistingValue); + redis.RemoveItemFromSet(Set, Factory.ExistingValue); - storeMembers.Remove(Factory.ExistingValue); + storeMembers.Remove(Factory.ExistingValue); - var members = redis.GetAllItemsFromSet(Set); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } + var members = redis.GetAllItemsFromSet(Set); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } - [Test] - public void Can_PopFromSet() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + [Test] + public void Can_PopFromSet() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - var member = redis.PopItemFromSet(Set); + var member = redis.PopItemFromSet(Set); - Assert.That(storeMembers.Contains(member), Is.True); - } + Assert.That(storeMembers.Contains(member), Is.True); + } - [Test] - public void Can_MoveBetweenSets() - { - var fromSetMembers = Factory.CreateList(); - var toSetMembers = Factory.CreateList2(); + [Test] + public void Can_MoveBetweenSets() + { + var fromSetMembers = Factory.CreateList(); + var toSetMembers = Factory.CreateList2(); - fromSetMembers.ForEach(x => redis.AddItemToSet(Set, x)); - toSetMembers.ForEach(x => redis.AddItemToSet(Set2, x)); + fromSetMembers.ForEach(x => redis.AddItemToSet(Set, x)); + toSetMembers.ForEach(x => redis.AddItemToSet(Set2, x)); - redis.MoveBetweenSets(Set, Set2, Factory.ExistingValue); + redis.MoveBetweenSets(Set, Set2, Factory.ExistingValue); - fromSetMembers.Remove(Factory.ExistingValue); - toSetMembers.Add(Factory.ExistingValue); + fromSetMembers.Remove(Factory.ExistingValue); + toSetMembers.Add(Factory.ExistingValue); - var readFromSetId = redis.GetAllItemsFromSet(Set); - var readToSetId = redis.GetAllItemsFromSet(Set2); + var readFromSetId = redis.GetAllItemsFromSet(Set); + var readToSetId = redis.GetAllItemsFromSet(Set2); - Assert.That(readFromSetId, Is.EquivalentTo(fromSetMembers)); - Assert.That(readToSetId, Is.EquivalentTo(toSetMembers)); - } + Assert.That(readFromSetId, Is.EquivalentTo(fromSetMembers)); + Assert.That(readToSetId, Is.EquivalentTo(toSetMembers)); + } - [Test] - public void Can_GetSetCount() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + [Test] + public void Can_GetSetCount() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - var setCount = redis.GetSetCount(Set); + var setCount = redis.GetSetCount(Set); - Assert.That(setCount, Is.EqualTo(storeMembers.Count)); - } + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } - [Test] - public void Does_SetContainsValue() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + [Test] + public void Does_SetContainsValue() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - Assert.That(redis.SetContainsItem(Set, Factory.ExistingValue), Is.True); - Assert.That(redis.SetContainsItem(Set, Factory.NonExistingValue), Is.False); - } + Assert.That(redis.SetContainsItem(Set, Factory.ExistingValue), Is.True); + Assert.That(redis.SetContainsItem(Set, Factory.NonExistingValue), Is.False); + } - [Test] - public void Can_IntersectBetweenSets() - { - var storeMembers = Factory.CreateList(); - var storeMembers2 = Factory.CreateList2(); + [Test] + public void Can_IntersectBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); - storeMembers.Add(storeMembers2.First()); - storeMembers2.Add(storeMembers.First()); + storeMembers.Add(storeMembers2.First()); + storeMembers2.Add(storeMembers.First()); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); - var intersectingMembers = redis.GetIntersectFromSets(Set, Set2); + var intersectingMembers = redis.GetIntersectFromSets(Set, Set2); - var intersect = Set.ToList().Intersect(Set2.ToList()).ToList(); + var intersect = Set.ToList().Intersect(Set2.ToList()).ToList(); - Assert.That(intersectingMembers, Is.EquivalentTo(intersect)); - } + Assert.That(intersectingMembers, Is.EquivalentTo(intersect)); + } - [Test] - public void Can_Store_IntersectBetweenSets() - { - var storeMembers = Factory.CreateList(); - var storeMembers2 = Factory.CreateList2(); + [Test] + public void Can_Store_IntersectBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); - redis.StoreIntersectFromSets(Set3, Set, Set2); + redis.StoreIntersectFromSets(Set3, Set, Set2); - var intersect= Set.ToList().Intersect(Set2).ToList(); + var intersect = Set.ToList().Intersect(Set2).ToList(); - Assert.That(Set3, Is.EquivalentTo(intersect)); - } + Assert.That(Set3, Is.EquivalentTo(intersect)); + } - [Test] - public void Can_UnionBetweenSets() - { - var storeMembers = Factory.CreateList(); - var storeMembers2 = Factory.CreateList2(); + [Test] + public void Can_UnionBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); - var unionMembers = redis.GetUnionFromSets(Set, Set2); + var unionMembers = redis.GetUnionFromSets(Set, Set2); - var union = Set.Union(Set2).ToList(); + var union = Set.Union(Set2).ToList(); - Assert.That(unionMembers, Is.EquivalentTo(union)); - } + Assert.That(unionMembers, Is.EquivalentTo(union)); + } - [Test] - public void Can_Store_UnionBetweenSets() - { - var storeMembers = Factory.CreateList(); - var storeMembers2 = Factory.CreateList2(); + [Test] + public void Can_Store_UnionBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); - redis.StoreUnionFromSets(Set3, Set, Set2); + redis.StoreUnionFromSets(Set3, Set, Set2); - var union= Set.ToList().Union(Set2).ToList(); + var union = Set.ToList().Union(Set2).ToList(); - Assert.That(Set3, Is.EquivalentTo(union)); - } + Assert.That(Set3, Is.EquivalentTo(union)); + } - [Test] - public void Can_DiffBetweenSets() - { - var storeMembers = Factory.CreateList(); - storeMembers.Add(Factory.CreateInstance(1)); + [Test] + public void Can_DiffBetweenSets() + { + var storeMembers = Factory.CreateList(); + storeMembers.Add(Factory.CreateInstance(1)); - var storeMembers2 = Factory.CreateList2(); - storeMembers2.Insert(0, Factory.CreateInstance(4)); + var storeMembers2 = Factory.CreateList2(); + storeMembers2.Insert(0, Factory.CreateInstance(4)); - var storeMembers3 = new List { - Factory.CreateInstance(1), - Factory.CreateInstance(5), - Factory.CreateInstance(7), - Factory.CreateInstance(11), - }; + var storeMembers3 = new List { + Factory.CreateInstance(1), + Factory.CreateInstance(5), + Factory.CreateInstance(7), + Factory.CreateInstance(11), + }; - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); - storeMembers3.ForEach(x => redis.AddItemToSet(Set3, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); + storeMembers3.ForEach(x => redis.AddItemToSet(Set3, x)); - var diffMembers = redis.GetDifferencesFromSet(Set, Set2, Set3); + var diffMembers = redis.GetDifferencesFromSet(Set, Set2, Set3); - Assert.That(diffMembers, Is.EquivalentTo( - new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); - } + Assert.That(diffMembers, Is.EquivalentTo( + new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); + } - [Test] - public void Can_Store_DiffBetweenSets() - { - var storeMembers = Factory.CreateList(); - storeMembers.Add(Factory.CreateInstance(1)); + [Test] + public void Can_Store_DiffBetweenSets() + { + var storeMembers = Factory.CreateList(); + storeMembers.Add(Factory.CreateInstance(1)); - var storeMembers2 = Factory.CreateList2(); - storeMembers2.Insert(0, Factory.CreateInstance(4)); + var storeMembers2 = Factory.CreateList2(); + storeMembers2.Insert(0, Factory.CreateInstance(4)); - var storeMembers3 = new List { - Factory.CreateInstance(1), - Factory.CreateInstance(5), - Factory.CreateInstance(7), - Factory.CreateInstance(11), - }; + var storeMembers3 = new List { + Factory.CreateInstance(1), + Factory.CreateInstance(5), + Factory.CreateInstance(7), + Factory.CreateInstance(11), + }; - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); - storeMembers3.ForEach(x => redis.AddItemToSet(Set3, x)); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + storeMembers2.ForEach(x => redis.AddItemToSet(Set2, x)); + storeMembers3.ForEach(x => redis.AddItemToSet(Set3, x)); - var storeSet = redis.Sets["testdiffsetstore"]; + var storeSet = redis.Sets["testdiffsetstore"]; - redis.StoreDifferencesFromSet(storeSet, Set, Set2, Set3); + redis.StoreDifferencesFromSet(storeSet, Set, Set2, Set3); - Assert.That(storeSet, Is.EquivalentTo( - new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); + Assert.That(storeSet, Is.EquivalentTo( + new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); - } + } - [Test] - public void Can_GetRandomEntryFromSet() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + [Test] + public void Can_GetRandomEntryFromSet() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - var randomEntry = redis.GetRandomItemFromSet(Set); + var randomEntry = redis.GetRandomItemFromSet(Set); - Assert.That(storeMembers.Contains(randomEntry), Is.True); - } + Assert.That(storeMembers.Contains(randomEntry), Is.True); + } - [Test] - public void Can_enumerate_small_ICollection_Set() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + [Test] + public void Can_enumerate_small_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - var members = new List(); - foreach (var item in Set) - { - members.Add(item); - } + var members = new List(); + foreach (var item in Set) + { + members.Add(item); + } - Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } - [Test] - public void Can_enumerate_large_ICollection_Set() - { - if (TestConfig.IgnoreLongTests) return; + [Test] + public void Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; - const int setSize = 2500; + const int setSize = 2500; - var storeMembers = new List(); - setSize.Times(x => { - redis.AddItemToSet(Set, Factory.CreateInstance(x)); - storeMembers.Add(Factory.CreateInstance(x)); - }); + var storeMembers = new List(); + setSize.Times(x => + { + redis.AddItemToSet(Set, Factory.CreateInstance(x)); + storeMembers.Add(Factory.CreateInstance(x)); + }); - var members = new List(); - foreach (var item in Set) - { - members.Add(item); - } - members.Sort((x, y) => x.GetId().ToString().CompareTo(y.GetId().ToString())); - Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } + var members = new List(); + foreach (var item in Set) + { + members.Add(item); + } + members.Sort((x, y) => x.GetId().ToString().CompareTo(y.GetId().ToString())); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_Add_to_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - [Test] - public void Can_Add_to_ICollection_Set() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + var members = Set.ToList(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } - var members = Set.ToList(); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } + [Test] + public void Can_Clear_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + + Assert.That(Set.Count, Is.EqualTo(storeMembers.Count)); - [Test] - public void Can_Clear_ICollection_Set() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + Set.Clear(); + + Assert.That(Set.Count, Is.EqualTo(0)); + } - Assert.That(Set.Count, Is.EqualTo(storeMembers.Count)); + [Test] + public void Can_Test_Contains_in_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); + + Assert.That(Set.Contains(Factory.ExistingValue), Is.True); + Assert.That(Set.Contains(Factory.NonExistingValue), Is.False); + } - Set.Clear(); + [Test] + public void Can_Remove_value_from_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - Assert.That(Set.Count, Is.EqualTo(0)); - } + storeMembers.Remove(Factory.ExistingValue); + Set.Remove(Factory.ExistingValue); - [Test] - public void Can_Test_Contains_in_ICollection_Set() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - - Assert.That(Set.Contains(Factory.ExistingValue), Is.True); - Assert.That(Set.Contains(Factory.NonExistingValue), Is.False); - } - - [Test] - public void Can_Remove_value_from_ICollection_Set() - { - var storeMembers = Factory.CreateList(); - storeMembers.ForEach(x => redis.AddItemToSet(Set, x)); - - storeMembers.Remove(Factory.ExistingValue); - Set.Remove(Factory.ExistingValue); - - var members = Set.ToList(); - - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - } + var members = Set.ToList(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.Async.cs new file mode 100644 index 00000000..9eb33a51 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.Async.cs @@ -0,0 +1,81 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Support; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisClientSetTestsModelWithFieldsOfDifferentTypesAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsStringAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsShipperAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsIntAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new IntFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsCustomTypeAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + + //public class RedisClientSetTestsDateTimeAsync + // : RedisClientSetTestsBaseAsync + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); + + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.cs index fd1a953c..015f7117 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.cs @@ -6,76 +6,76 @@ namespace ServiceStack.Redis.Tests.Generic { [TestFixture] - public class RedisClientSetTestsModelWithFieldsOfDifferentTypes - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = - new ModelWithFieldsOfDifferentTypesFactory(); + public class RedisClientSetTestsModelWithFieldsOfDifferentTypes + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] public class RedisClientSetTestsString - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = new BuiltInsFactory(); + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = new BuiltInsFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] public class RedisClientSetTestsShipper - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = new ShipperFactory(); + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = new ShipperFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] public class RedisClientSetTestsInt - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = new IntFactory(); + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = new IntFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } [TestFixture] public class RedisClientSetTestsCustomType - : RedisClientSetTestsBase - { - private readonly IModelFactory factory = new CustomTypeFactory(); + : RedisClientSetTestsBase + { + private readonly IModelFactory factory = new CustomTypeFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } - //public class RedisClientSetTestsDateTime - // : RedisClientSetTestsBase - //{ - // private readonly IModelFactory factory = new DateTimeFactory(); + //public class RedisClientSetTestsDateTime + // : RedisClientSetTestsBase + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); - // protected override IModelFactory Factory - // { - // get { return factory; } - // } - //} + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.Async.cs new file mode 100644 index 00000000..a553c572 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.Async.cs @@ -0,0 +1,129 @@ +using NUnit.Framework; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Integration"), Category("Async")] + public class RedisClientTestsAsync : RedisClientTestsBaseAsync + { + [OneTimeSetUp] + public void TestFixture() + { + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "GenericRedisClientTests"; + } + + [Test] + public async Task Can_Set_and_Get_string() + { + const string value = "value"; + await RedisAsync.SetValueAsync("key", value); + var valueString = await RedisAsync.GetValueAsync("key"); + + Assert.That(valueString, Is.EqualTo(value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + await NativeAsync.SetAsync(key, value); + var resultValue = await NativeAsync.GetAsync(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + public List Sort(IEnumerable list) + { + var sortedList = list.ToList(); + sortedList.Sort((x, y) => + x.GetId().ToString().CompareTo(y.GetId().ToString())); + + return sortedList; + } + + [Test] + public async Task Can_SetBit_And_GetBit_And_BitCount() + { + const string key = "BitKey"; + const int offset = 100; + await NativeAsync.SetBitAsync(key, offset, 1); + Assert.AreEqual(1, await NativeAsync.GetBitAsync(key, offset)); + Assert.AreEqual(1, await NativeAsync.BitCountAsync(key)); + } + + public class Dummy + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public async Task Can_Delete() + { + var dto = new Dummy { Id = 1, Name = "Name" }; + + await RedisAsync.StoreAsync(dto); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).ToArray()[0], Is.EqualTo("1")); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Not.Null); + + await RedisAsync.DeleteAsync(dto); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).Count, Is.EqualTo(0)); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Null); + } + + [Test] + public async Task Can_DeleteById() + { + var dto = new Dummy { Id = 1, Name = "Name" }; + await RedisAsync.StoreAsync(dto); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).ToArray()[0], Is.EqualTo("1")); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Not.Null); + + await RedisAsync.DeleteByIdAsync(dto.Id); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).Count, Is.EqualTo(0)); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Null); + } + + [Test] + public async Task Can_save_via_string() + { + var dtos = 10.Times(i => new Dummy { Id = i, Name = "Name" + i }); + + await RedisAsync.SetValueAsync("dummy:strings", dtos.ToJson()); + + var fromDtos = (await RedisAsync.GetValueAsync("dummy:strings")).FromJson>(); + + Assert.That(fromDtos.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_save_via_types() + { + var dtos = 10.Times(i => new Dummy { Id = i, Name = "Name" + i }); + + await RedisAsync.SetAsync("dummy:strings", dtos); + + var fromDtos = await RedisAsync.GetAsync>("dummy:strings"); + + Assert.That(fromDtos.Count, Is.EqualTo(10)); + } + } +} diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs index 1b2a9072..57f1c238 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs @@ -1,24 +1,17 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; +using System.Collections.Generic; using System.Linq; -using System.Threading; -using Northwind.Common.DataModel; using NUnit.Framework; -using ServiceStack.Common.Extensions; -using ServiceStack.Common.Utils; using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Generic { - [TestFixture, Category("Integration")] + [TestFixture, Category("Integration")] public class RedisClientTests : RedisClientTestsBase - { - [TestFixtureSetUp] - public void TestFixture() - { - NorthwindData.LoadData(false); - } + { + [OneTimeSetUp] + public void TestFixture() + { + } public override void OnBeforeEachTest() { @@ -26,229 +19,74 @@ public override void OnBeforeEachTest() Redis.NamespacePrefix = "GenericRedisClientTests"; } - [Test] - public void Can_GetTypeIdsSet() - { - using (var typedClient = Redis.GetTypedClient()) - { - typedClient.StoreAll(NorthwindData.OrderDetails); - - Assert.That(typedClient.TypeIdsSet.Count, Is.EqualTo(NorthwindData.OrderDetails.Count)); - } - } - - [Test] - public void Can_Set_and_Get_string() - { - const string value = "value"; - Redis.SetEntry("key", value); - var valueString = Redis.GetValue("key"); - - Assert.That(valueString, Is.EqualTo(value)); - } - - [Test] - public void Can_Set_and_Get_key_with_all_byte_values() - { - const string key = "bytesKey"; - - var value = new byte[256]; - for (var i = 0; i < value.Length; i++) - { - value[i] = (byte) i; - } - - var redis = Redis.GetTypedClient(); - - redis.SetEntry(key, value); - var resultValue = redis.GetValue(key); - - Assert.That(resultValue, Is.EquivalentTo(value)); - } - - public List Sort(IEnumerable list) - { - var sortedList = list.ToList(); - sortedList.Sort((x, y) => - x.GetId().ToString().CompareTo(y.GetId().ToString())); - - return sortedList; - } - - public void AssertUnorderedListsAreEqual(IList actualList, IList expectedList) - { - Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); - - var actualMap = Sort(actualList.Select(x => x.GetId())); - var expectedMap = Sort(expectedList.Select(x => x.GetId())); - - Assert.That(actualMap, Is.EquivalentTo(expectedMap)); - } - - [Test] - public void Can_StoreAll_RedisClient() - { - var sp = Stopwatch.StartNew(); - Redis.StoreAll(NorthwindData.OrderDetails); - - var orderDetails = Redis.GetAll(); - AssertUnorderedListsAreEqual(orderDetails, NorthwindData.OrderDetails); - - Debug.WriteLine(String.Format("\nWrote {0:#,#} in {1:#,#}ms: {2:#,#.##}: items/ms", - NorthwindData.OrderDetails.Count, sp.ElapsedMilliseconds, - NorthwindData.OrderDetails.Count / (double)sp.ElapsedMilliseconds)); - } - - [Test] - public void Can_StoreAll_RedisTypedClient() - { - var sp = Stopwatch.StartNew(); - using (var typedClient = Redis.GetTypedClient()) - { - typedClient.StoreAll(NorthwindData.OrderDetails); - - var orderDetails = typedClient.GetAll(); - AssertUnorderedListsAreEqual(orderDetails, NorthwindData.OrderDetails); - } - Debug.WriteLine(String.Format("\nWrote {0:#,#} in {1:#,#}ms: {2:#,#.##}: items/ms", - NorthwindData.OrderDetails.Count, sp.ElapsedMilliseconds, - NorthwindData.OrderDetails.Count / (double)sp.ElapsedMilliseconds)); - } - [Test] - public void Can_SetBit_And_GetBit() + public void Can_Set_and_Get_string() { - const string key = "BitKey"; - const int offset = 100; - Redis.SetBit(key, offset, 1); - Assert.AreEqual(1, Redis.GetBit(key,offset)); - } + const string value = "value"; + Redis.SetValue("key", value); + var valueString = Redis.GetValue("key"); - [Test, Explicit] - public void Can_StoreAll_and_GetAll_from_Northwind() - { - var totalRecords - = NorthwindData.Categories.Count - + NorthwindData.Customers.Count - + NorthwindData.Employees.Count - + NorthwindData.Shippers.Count - + NorthwindData.Orders.Count - + NorthwindData.OrderDetails.Count - + NorthwindData.CustomerCustomerDemos.Count - + NorthwindData.Regions.Count - + NorthwindData.Territories.Count - + NorthwindData.EmployeeTerritories.Count; - - var before = DateTime.Now; - - Redis.StoreAll(NorthwindData.Categories); - Redis.StoreAll(NorthwindData.Customers); - Redis.StoreAll(NorthwindData.Employees); - Redis.StoreAll(NorthwindData.Shippers); - Redis.StoreAll(NorthwindData.Orders); - Redis.StoreAll(NorthwindData.Products); - Redis.StoreAll(NorthwindData.OrderDetails); - Redis.StoreAll(NorthwindData.CustomerCustomerDemos); - Redis.StoreAll(NorthwindData.Regions); - Redis.StoreAll(NorthwindData.Territories); - Redis.StoreAll(NorthwindData.EmployeeTerritories); - - Debug.WriteLine(String.Format("Took {0}ms to store the entire Northwind database ({1} records)", - (DateTime.Now - before).TotalMilliseconds, totalRecords)); - - - before = DateTime.Now; - - var categories = Redis.GetAll(); - var customers = Redis.GetAll(); - var employees = Redis.GetAll(); - var shippers = Redis.GetAll(); - var orders = Redis.GetAll(); - var products = Redis.GetAll(); - var orderDetails = Redis.GetAll(); - var customerCustomerDemos = Redis.GetAll(); - var regions = Redis.GetAll(); - var territories = Redis.GetAll(); - var employeeTerritories = Redis.GetAll(); - - Debug.WriteLine(String.Format("Took {0}ms to get the entire Northwind database ({1} records)", - (DateTime.Now - before).TotalMilliseconds, totalRecords)); - - - AssertUnorderedListsAreEqual(categories, NorthwindData.Categories); - AssertUnorderedListsAreEqual(customers, NorthwindData.Customers); - AssertUnorderedListsAreEqual(employees, NorthwindData.Employees); - AssertUnorderedListsAreEqual(shippers, NorthwindData.Shippers); - AssertUnorderedListsAreEqual(orders, NorthwindData.Orders); - AssertUnorderedListsAreEqual(products, NorthwindData.Products); - AssertUnorderedListsAreEqual(orderDetails, NorthwindData.OrderDetails); - AssertUnorderedListsAreEqual(customerCustomerDemos, NorthwindData.CustomerCustomerDemos); - AssertUnorderedListsAreEqual(regions, NorthwindData.Regions); - AssertUnorderedListsAreEqual(territories, NorthwindData.Territories); - AssertUnorderedListsAreEqual(employeeTerritories, NorthwindData.EmployeeTerritories); - } + Assert.That(valueString, Is.EqualTo(value)); + } [Test] - public void Can_Store_And_Get_Entities_As_Hashes() + public void Can_Set_and_Get_key_with_all_byte_values() { - var entity = NorthwindData.Customers[0]; - Redis.StoreAsHash(entity); - var fromDb = Redis.GetFromHash(entity.Id); + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + var redis = Redis.As(); + + redis.SetValue(key, value); + var resultValue = redis.GetValue(key); - Assert.AreEqual(entity.Address, fromDb.Address); - Assert.AreEqual(entity.CompanyName,fromDb.CompanyName); - Assert.AreEqual(entity.Region,fromDb.Region); + Assert.That(resultValue, Is.EquivalentTo(value)); } - private class ComplexShipper : Shipper + public List Sort(IEnumerable list) { - public ComplexShipper() - { - SomeIds = new List(); - Addresses = new Dictionary(); - } - public IList SomeIds { get; set; } - public IDictionary Addresses { get; set; } + var sortedList = list.ToList(); + sortedList.Sort((x, y) => + x.GetId().ToString().CompareTo(y.GetId().ToString())); + + return sortedList; } - [Test, Ignore("Dictionary serialized differently")] - public void Can_Store_Complex_Entity_As_Hash() + public void AssertUnorderedListsAreEqual(IList actualList, IList expectedList) { - var entity = new ComplexShipper() - { - CompanyName = "Test Company", - Phone = "0123456789", - SomeIds = new List() { 123, 456, 789 }, - Addresses = - new Dictionary() - { - { "Home", "1 Some Street, some town" }, - { "Work", "2 Office Street, City" } - } - }; - - entity.Id = (int)(Redis.As().GetNextSequence()); - Redis.As().StoreAsHash(entity); - - var fromDb = Redis.As().GetFromHash(entity.Id); - Assert.AreEqual(entity.CompanyName, fromDb.CompanyName); - Assert.AreEqual(entity.Phone,fromDb.Phone); - Assert.AreEqual(entity.SomeIds, fromDb.SomeIds); - Assert.AreEqual(entity.Addresses, fromDb.Addresses); - var addressesSerialized = JsonSerializer.SerializeToString(entity.Addresses); - Assert.AreEqual(addressesSerialized, Redis.GetValueFromHash(entity.CreateUrn(), "Addresses")); + Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); + + var actualMap = Sort(actualList.Select(x => x.GetId())); + var expectedMap = Sort(expectedList.Select(x => x.GetId())); + + Assert.That(actualMap, Is.EquivalentTo(expectedMap)); + } + + [Test] + public void Can_SetBit_And_GetBit_And_BitCount() + { + const string key = "BitKey"; + const int offset = 100; + Redis.SetBit(key, offset, 1); + Assert.AreEqual(1, Redis.GetBit(key, offset)); + Assert.AreEqual(1, Redis.BitCount(key)); } - public class Dummy - { - public int Id { get; set; } - public string Name { get; set; } - } + public class Dummy + { + public int Id { get; set; } + public string Name { get; set; } + } - [Test] - public void Can_Delete() - { - var dto = new Dummy { Id = 1, Name = "Name" }; + [Test] + public void Can_Delete() + { + var dto = new Dummy { Id = 1, Name = "Name" }; Redis.Store(dto); @@ -259,12 +97,12 @@ public void Can_Delete() Assert.That(Redis.GetAllItemsFromSet(Redis.NamespacePrefix + "ids:Dummy").Count, Is.EqualTo(0)); Assert.That(Redis.GetById(1), Is.Null); - } + } - [Test] - public void Can_DeleteById() - { - var dto = new Dummy { Id = 1, Name = "Name" }; + [Test] + public void Can_DeleteById() + { + var dto = new Dummy { Id = 1, Name = "Name" }; Redis.Store(dto); Assert.That(Redis.GetAllItemsFromSet(Redis.NamespacePrefix + "ids:Dummy").ToArray()[0], Is.EqualTo("1")); @@ -274,7 +112,30 @@ public void Can_DeleteById() Assert.That(Redis.GetAllItemsFromSet(Redis.NamespacePrefix + "ids:Dummy").Count, Is.EqualTo(0)); Assert.That(Redis.GetById(1), Is.Null); - } + } - } + [Test] + public void Can_save_via_string() + { + var dtos = 10.Times(i => new Dummy { Id = i, Name = "Name" + i }); + + Redis.SetValue("dummy:strings", dtos.ToJson()); + + var fromDtos = Redis.GetValue("dummy:strings").FromJson>(); + + Assert.That(fromDtos.Count, Is.EqualTo(10)); + } + + [Test] + public void Can_save_via_types() + { + var dtos = 10.Times(i => new Dummy { Id = i, Name = "Name" + i }); + + Redis.Set("dummy:strings", dtos); + + var fromDtos = Redis.Get>("dummy:strings"); + + Assert.That(fromDtos.Count, Is.EqualTo(10)); + } + } } diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTestsBase.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTestsBase.cs index c14075b6..3b0ea627 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTestsBase.cs @@ -2,15 +2,15 @@ namespace ServiceStack.Redis.Tests.Generic { - public interface IRedisClientTestsBase - { - void AssertListsAreEqual(List actualList, IList expectedList); - void AssertIsEqual(T actual, T expected); + public interface IRedisClientTestsBase + { + void AssertListsAreEqual(List actualList, IList expectedList); + void AssertIsEqual(T actual, T expected); - T ExistingValue { get; } - T NonExistingValue { get; } - List CreateList(); - List CreateList2(); - T CreateInstance(int i); - } + T ExistingValue { get; } + T NonExistingValue { get; } + List CreateList(); + List CreateList2(); + T CreateInstance(int i); + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.Async.cs new file mode 100644 index 00000000..92880411 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.Async.cs @@ -0,0 +1,97 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public abstract class RedisPersistenceProviderTestsBaseAsync + { + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await client.FlushAllAsync(); + + redis = client.As(); + } + + [Test] + public async Task Can_Store_and_GetById_ModelWithIdAndName() + { + const int modelId = 1; + var to = Factory.CreateInstance(modelId); + await redis.StoreAsync(to); + + var from = await redis.GetByIdAsync(to.GetId().ToString()); + + Factory.AssertIsEqual(to, from); + } + + [Test] + public async Task Can_StoreAll_and_GetByIds_ModelWithIdAndName() + { + var tos = Factory.CreateList(); + var ids = tos.ConvertAll(x => x.GetId().ToString()); + + await redis.StoreAllAsync(tos); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.GetId().ToString()); + + Assert.That(fromIds, Is.EquivalentTo(ids)); + } + + [Test] + public async Task Can_Delete_ModelWithIdAndName() + { + var tos = Factory.CreateList(); + var ids = tos.ConvertAll(x => x.GetId().ToString()); + + await redis.StoreAllAsync(tos); + + var deleteIds = new[] { ids[1], ids[3] }; + + await redis.DeleteByIdsAsync(deleteIds); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.GetId().ToString()); + + var expectedIds = ids.Where(x => !deleteIds.Contains(x)) + .ToList().ConvertAll(x => x.ToString()); + + Assert.That(fromIds, Is.EquivalentTo(expectedIds)); + } + + [Test] + public async Task Can_DeleteAll() + { + var tos = Factory.CreateList(); + await redis.StoreAllAsync(tos); + + var all = await redis.GetAllAsync(); + + Assert.That(all.Count, Is.EqualTo(tos.Count)); + + await redis.DeleteAllAsync(); + + all = await redis.GetAllAsync(); + + Assert.That(all.Count, Is.EqualTo(0)); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.cs index d903550a..3123334c 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.cs @@ -1,80 +1,78 @@ using System.Linq; using NUnit.Framework; -using ServiceStack.Common.Extensions; using ServiceStack.Common.Tests.Models; -using ServiceStack.Common.Utils; using ServiceStack.Redis.Generic; namespace ServiceStack.Redis.Tests.Generic { - [TestFixture] - public abstract class RedisPersistenceProviderTestsBase - { - protected abstract IModelFactory Factory { get; } + [TestFixture] + public abstract class RedisPersistenceProviderTestsBase + { + protected abstract IModelFactory Factory { get; } - private RedisClient client; - private IRedisTypedClient redis; + private RedisClient client; + private IRedisTypedClient redis; - [SetUp] - public void SetUp() - { - if (client != null) - { - client.Dispose(); - client = null; - } - client = new RedisClient(TestConfig.SingleHost); - client.FlushAll(); - - redis = client.GetTypedClient(); - } + [SetUp] + public void SetUp() + { + if (client != null) + { + client.Dispose(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + client.FlushAll(); + + redis = client.As(); + } - [Test] - public void Can_Store_and_GetById_ModelWithIdAndName() - { - const int modelId = 1; - var to = Factory.CreateInstance(modelId); - redis.Store(to); + [Test] + public void Can_Store_and_GetById_ModelWithIdAndName() + { + const int modelId = 1; + var to = Factory.CreateInstance(modelId); + redis.Store(to); - var from = redis.GetById(to.GetId().ToString()); + var from = redis.GetById(to.GetId().ToString()); - Factory.AssertIsEqual(to, from); - } + Factory.AssertIsEqual(to, from); + } - [Test] - public void Can_StoreAll_and_GetByIds_ModelWithIdAndName() - { - var tos = Factory.CreateList(); - var ids = tos.ConvertAll(x => x.GetId().ToString()); + [Test] + public void Can_StoreAll_and_GetByIds_ModelWithIdAndName() + { + var tos = Factory.CreateList(); + var ids = tos.ConvertAll(x => x.GetId().ToString()); - redis.StoreAll(tos); + redis.StoreAll(tos); - var froms = redis.GetByIds(ids); - var fromIds = froms.ConvertAll(x => x.GetId().ToString()); + var froms = redis.GetByIds(ids); + var fromIds = froms.Map(x => x.GetId().ToString()); - Assert.That(fromIds, Is.EquivalentTo(ids)); - } + Assert.That(fromIds, Is.EquivalentTo(ids)); + } - [Test] - public void Can_Delete_ModelWithIdAndName() - { - var tos = Factory.CreateList(); - var ids = tos.ConvertAll(x => x.GetId().ToString()); + [Test] + public void Can_Delete_ModelWithIdAndName() + { + var tos = Factory.CreateList(); + var ids = tos.ConvertAll(x => x.GetId().ToString()); - redis.StoreAll(tos); + redis.StoreAll(tos); - var deleteIds = new[] { ids[1], ids[3] }; + var deleteIds = new[] { ids[1], ids[3] }; - redis.DeleteByIds(deleteIds); + redis.DeleteByIds(deleteIds); - var froms = redis.GetByIds(ids); - var fromIds = froms.ConvertAll(x => x.GetId().ToString()); + var froms = redis.GetByIds(ids); + var fromIds = froms.Map(x => x.GetId().ToString()); - var expectedIds = ids.Where(x => !deleteIds.Contains(x)) - .ToList().ConvertAll(x => x.ToString()); + var expectedIds = ids.Where(x => !deleteIds.Contains(x)) + .ToList().ConvertAll(x => x.ToString()); - Assert.That(fromIds, Is.EquivalentTo(expectedIds)); - } + Assert.That(fromIds, Is.EquivalentTo(expectedIds)); + } [Test] public void Can_DeleteAll() @@ -93,5 +91,5 @@ public void Can_DeleteAll() Assert.That(all.Count, Is.EqualTo(0)); } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.Async.cs new file mode 100644 index 00000000..3f7c00cf --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.Async.cs @@ -0,0 +1,40 @@ +using System; +using ServiceStack.Common.Tests.Models; + +namespace ServiceStack.Redis.Tests.Generic +{ + public class RedisPersistenceProviderTestsModelWithFieldsOfDifferentTypesAsync + : RedisPersistenceProviderTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + public class RedisPersistenceProviderTestsStringFactoryAsync + : RedisPersistenceProviderTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + public class RedisPersistenceProviderTestsShipperAsync + : RedisPersistenceProviderTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.cs index 46bd48de..7084f7e0 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.cs @@ -3,38 +3,38 @@ namespace ServiceStack.Redis.Tests.Generic { - public class RedisPersistenceProviderTestsModelWithFieldsOfDifferentTypes - : RedisPersistenceProviderTestsBase - { - private readonly IModelFactory factory = - new ModelWithFieldsOfDifferentTypesFactory(); + public class RedisPersistenceProviderTestsModelWithFieldsOfDifferentTypes + : RedisPersistenceProviderTestsBase + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } - public class RedisPersistenceProviderTestsStringFactory - : RedisPersistenceProviderTestsBase - { - private readonly IModelFactory factory = new BuiltInsFactory(); + public class RedisPersistenceProviderTestsStringFactory + : RedisPersistenceProviderTestsBase + { + private readonly IModelFactory factory = new BuiltInsFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } - public class RedisPersistenceProviderTestsShipper - : RedisPersistenceProviderTestsBase - { - private readonly IModelFactory factory = new ShipperFactory(); + public class RedisPersistenceProviderTestsShipper + : RedisPersistenceProviderTestsBase + { + private readonly IModelFactory factory = new ShipperFactory(); - protected override IModelFactory Factory - { - get { return factory; } - } - } + protected override IModelFactory Factory + { + get { return factory; } + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs index 45f3d0c6..89618a1e 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs @@ -1,199 +1,261 @@ using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common.Extensions; using ServiceStack.Redis.Generic; namespace ServiceStack.Redis.Tests.Generic { - public class Question - { - public long Id { get; set; } - public string UserId { get; set; } - public string Title { get; set; } - public string Content { get; set; } - - public static Question Create(long id) - { - return new Question - { - Id = id, - Content = "Content" + id, - Title = "Title" + id, - UserId = "User" + id, - }; - } - } - - public class Answer - { - public long Id { get; set; } - public long QuestionId { get; set; } - public string UserId { get; set; } - public string Content { get; set; } - - public static Answer Create(long id, long questionId) - { - return new Answer - { - Id = id, - QuestionId = questionId, - UserId = "User" + id, - Content = "Content" + id, - }; - } - - public bool Equals(Answer other) - { - if (ReferenceEquals(null, other)) return false; - if (ReferenceEquals(this, other)) return true; - return other.Id == Id && other.QuestionId == QuestionId && Equals(other.UserId, UserId) && Equals(other.Content, Content); - } - - public override bool Equals(object obj) - { - if (ReferenceEquals(null, obj)) return false; - if (ReferenceEquals(this, obj)) return true; - if (obj.GetType() != typeof(Answer)) return false; - return Equals((Answer)obj); - } - - public override int GetHashCode() - { - return Id.GetHashCode(); - } - } - - [TestFixture] - public class RedisTypedClientAppTests - : RedisClientTestsBase - { - private IRedisTypedClient redisQuestions; - readonly Question question1 = Question.Create(1); - List q1Answers; - - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); - - redisQuestions = base.Redis.GetTypedClient(); - redisQuestions.Db = 10; - redisQuestions.FlushDb(); - - q1Answers = new List - { - Answer.Create(1, question1.Id), - Answer.Create(2, question1.Id), - Answer.Create(3, question1.Id), - Answer.Create(4, question1.Id), - Answer.Create(5, question1.Id), - }; - } - - [Test] - public void Can_StoreRelatedEntities() - { - redisQuestions.Store(question1); - - redisQuestions.StoreRelatedEntities(question1.Id, q1Answers); - - var actualAnswers = redisQuestions.GetRelatedEntities(question1.Id); - actualAnswers.Sort((x, y) => x.Id.CompareTo(y.Id)); - - Assert.That(actualAnswers.EquivalentTo(q1Answers)); - } - - [Test] - public void Can_GetRelatedEntities_When_Empty() - { - redisQuestions.Store(question1); - - var answers = redisQuestions.GetRelatedEntities(question1.Id); - - Assert.That(answers, Has.Count.EqualTo(0)); - } - - [Test] - public void Can_DeleteRelatedEntity() - { - redisQuestions.Store(question1); - - redisQuestions.StoreRelatedEntities(question1.Id, q1Answers); - - var answerToDelete = q1Answers[3]; - redisQuestions.DeleteRelatedEntity(question1.Id, answerToDelete.Id); - - q1Answers.RemoveAll(x => x.Id == answerToDelete.Id); - - var answers = redisQuestions.GetRelatedEntities(question1.Id); - - Assert.That(answers.EquivalentTo(answers)); - } - - [Test] - public void Can_DeleteRelatedEntities() - { - redisQuestions.Store(question1); + public class Question + { + public long Id { get; set; } + public string UserId { get; set; } + public string Title { get; set; } + public string Content { get; set; } + + public static Question Create(long id) + { + return new Question + { + Id = id, + Content = "Content" + id, + Title = "Title" + id, + UserId = "User" + id, + }; + } + } + + public class Answer + { + public long Id { get; set; } + public long QuestionId { get; set; } + public string UserId { get; set; } + public string Content { get; set; } + + public static Answer Create(long id, long questionId) + { + return new Answer + { + Id = id, + QuestionId = questionId, + UserId = "User" + id, + Content = "Content" + id, + }; + } + + public bool Equals(Answer other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return other.Id == Id && other.QuestionId == QuestionId && Equals(other.UserId, UserId) && Equals(other.Content, Content); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(Answer)) return false; + return Equals((Answer)obj); + } + + public override int GetHashCode() + { + return Id.GetHashCode(); + } + } + + [TestFixture] + public class RedisTypedClientAppTests + : RedisClientTestsBase + { + private IRedisTypedClient redisQuestions; + readonly Question question1 = Question.Create(1); + List q1Answers; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + redisQuestions = base.Redis.As(); + redisQuestions.Db = 10; + redisQuestions.FlushDb(); + + q1Answers = new List + { + Answer.Create(1, question1.Id), + Answer.Create(2, question1.Id), + Answer.Create(3, question1.Id), + Answer.Create(4, question1.Id), + Answer.Create(5, question1.Id), + }; + } + + [Test] + public void Can_StoreRelatedEntities() + { + redisQuestions.Store(question1); + + redisQuestions.StoreRelatedEntities(question1.Id, q1Answers); + + var actualAnswers = redisQuestions.GetRelatedEntities(question1.Id); + actualAnswers.Sort((x, y) => x.Id.CompareTo(y.Id)); + + Assert.That(actualAnswers.EquivalentTo(q1Answers)); + } + + public class Customer + { + public string Id { get; set; } + public string Name { get; set; } + } + + public class CustomerAddress + { + public string Id { get; set; } + public string Address { get; set; } + } + + [Test] + public void Can_StoreRelatedEntities_with_StringId() + { + var redisCustomers = Redis.As(); + var customer = new Customer { Id = "CUST-01", Name = "Customer" }; + + redisCustomers.Store(customer); + + var addresses = new[] + { + new CustomerAddress { Id = "ADDR-01", Address = "1 Home Street" }, + new CustomerAddress { Id = "ADDR-02", Address = "2 Work Road" }, + }; + + redisCustomers.StoreRelatedEntities(customer.Id, addresses); + + var actualAddresses = redisCustomers.GetRelatedEntities(customer.Id); + + Assert.That(actualAddresses.Map(x => x.Id), + Is.EquivalentTo(new[] { "ADDR-01", "ADDR-02" })); + } + + [Test] + public void Can_GetRelatedEntities_When_Empty() + { + redisQuestions.Store(question1); - redisQuestions.StoreRelatedEntities(question1.Id, q1Answers); + var answers = redisQuestions.GetRelatedEntities(question1.Id); - redisQuestions.DeleteRelatedEntities(question1.Id); + Assert.That(answers, Has.Count.EqualTo(0)); + } - var answers = redisQuestions.GetRelatedEntities(question1.Id); + [Test] + public void Can_DeleteRelatedEntity() + { + redisQuestions.Store(question1); - Assert.That(answers.Count, Is.EqualTo(0)); - } + redisQuestions.StoreRelatedEntities(question1.Id, q1Answers); - [Test] - public void Can_AddToRecentsList() - { - var redisAnswers = Redis.GetTypedClient(); + var answerToDelete = q1Answers[3]; + redisQuestions.DeleteRelatedEntity(question1.Id, answerToDelete.Id); - redisAnswers.StoreAll(q1Answers); - q1Answers.ForEach(redisAnswers.AddToRecentsList); + q1Answers.RemoveAll(x => x.Id == answerToDelete.Id); - var allAnswers = redisAnswers.GetLatestFromRecentsList(0, int.MaxValue); - allAnswers.Sort((x, y) => x.Id.CompareTo(y.Id)); + var answers = redisQuestions.GetRelatedEntities(question1.Id); - Assert.That(allAnswers.EquivalentTo(q1Answers)); - } + Assert.That(answers.EquivalentTo(answers)); + } - [Test] - public void Can_GetLatestFromRecentsList() - { - var redisAnswers = Redis.GetTypedClient(); + [Test] + public void Can_DeleteRelatedEntities() + { + redisQuestions.Store(question1); - redisAnswers.StoreAll(q1Answers); - q1Answers.ForEach(redisAnswers.AddToRecentsList); + redisQuestions.StoreRelatedEntities(question1.Id, q1Answers); - var latest3Answers = redisAnswers.GetLatestFromRecentsList(0, 3); + redisQuestions.DeleteRelatedEntities(question1.Id); - var i = q1Answers.Count; - var expectedAnswers = new List - { - q1Answers[--i], q1Answers[--i], q1Answers[--i], - }; + var answers = redisQuestions.GetRelatedEntities(question1.Id); - Assert.That(expectedAnswers.EquivalentTo(latest3Answers)); - } + Assert.That(answers.Count, Is.EqualTo(0)); + } - [Test] - public void Can_GetEarliestFromRecentsList() - { - var redisAnswers = Redis.GetTypedClient(); + [Test] + public void Can_AddToRecentsList() + { + var redisAnswers = Redis.As(); - redisAnswers.StoreAll(q1Answers); - q1Answers.ForEach(redisAnswers.AddToRecentsList); + redisAnswers.StoreAll(q1Answers); + q1Answers.ForEach(redisAnswers.AddToRecentsList); - var earliest3Answers = redisAnswers.GetEarliestFromRecentsList(0, 3); + var allAnswers = redisAnswers.GetLatestFromRecentsList(0, int.MaxValue); + allAnswers.Sort((x, y) => x.Id.CompareTo(y.Id)); - var i = 0; - var expectedAnswers = new List - { - q1Answers[i++], q1Answers[i++], q1Answers[i++], - }; + Assert.That(allAnswers.EquivalentTo(q1Answers)); + } - Assert.That(expectedAnswers.EquivalentTo(earliest3Answers)); - } + [Test] + public void Can_GetLatestFromRecentsList() + { + var redisAnswers = Redis.As(); - } + redisAnswers.StoreAll(q1Answers); + q1Answers.ForEach(redisAnswers.AddToRecentsList); + + var latest3Answers = redisAnswers.GetLatestFromRecentsList(0, 3); + + var i = q1Answers.Count; + var expectedAnswers = new List + { + q1Answers[--i], q1Answers[--i], q1Answers[--i], + }; + + Assert.That(expectedAnswers.EquivalentTo(latest3Answers)); + } + + [Test] + public void Can_GetEarliestFromRecentsList() + { + var redisAnswers = Redis.As(); + + redisAnswers.StoreAll(q1Answers); + q1Answers.ForEach(redisAnswers.AddToRecentsList); + + var earliest3Answers = redisAnswers.GetEarliestFromRecentsList(0, 3); + + var i = 0; + var expectedAnswers = new List + { + q1Answers[i++], q1Answers[i++], q1Answers[i++], + }; + + Assert.That(expectedAnswers.EquivalentTo(earliest3Answers)); + } + + [Test] + public void Can_save_quoted_strings() + { + var str = "string \"with\" \"quotes\""; + var cacheKey = "quotetest"; + + Redis.As().SetValue(cacheKey, str); + var fromRedis = Redis.As().GetValue(cacheKey); + Assert.That(fromRedis, Is.EqualTo(str)); + + Redis.Set(cacheKey, str); + fromRedis = Redis.Get(cacheKey); + Assert.That(fromRedis, Is.EqualTo(str)); + + Redis.SetValue(cacheKey, str); + fromRedis = Redis.GetValue(cacheKey); + Assert.That(fromRedis, Is.EqualTo(str)); + + Redis.SetValue(cacheKey, str.ToJson()); + fromRedis = Redis.GetValue(cacheKey).FromJson(); + Assert.That(fromRedis, Is.EqualTo(str)); + } + + [Test] + public void Does_return_non_existent_keys_as_defaultValue() + { + Assert.That(Redis.Get("notexists"), Is.Null); + Assert.That(Redis.Get("notexists"), Is.EqualTo(0)); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.Async.cs new file mode 100644 index 00000000..7224d6c5 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.Async.cs @@ -0,0 +1,167 @@ +using NUnit.Framework; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Integration")] + public class RedisTypedClientTestsAsync + : RedisClientTestsBaseAsync + { + public class CacheRecord + { + public CacheRecord() + { + this.Children = new List(); + } + + public string Id { get; set; } + public List Children { get; set; } + } + + public class CacheRecordChild + { + public string Id { get; set; } + public string Data { get; set; } + } + + protected IRedisTypedClientAsync RedisTyped; + + [SetUp] + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + RedisRaw?.Dispose(); + RedisRaw = new RedisClient(TestConfig.SingleHost) + { + NamespacePrefix = "RedisTypedClientTests:" + }; + RedisTyped = RedisAsync.As(); + } + + [TearDown] + public virtual async Task TearDown() + { + foreach (var t in await RedisAsync.SearchKeysAsync(RedisRaw.NamespacePrefix + "*")) + { + await NativeAsync.DelAsync(t); + } + } + + [Test] + public async Task Can_Store_with_Prefix() + { + var expected = new CacheRecord() { Id = "123" }; + await RedisTyped.StoreAsync(expected); + var current = await RedisAsync.GetAsync("RedisTypedClientTests:urn:cacherecord:123"); + Assert.AreEqual(expected.Id, current.Id); + } + + [Test] + public async Task Can_Expire() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + await RedisTyped.StoreAsync(cachedRecord); + await RedisTyped.ExpireInAsync("key", TimeSpan.FromSeconds(1)); + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + await Task.Delay(2000); + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + } + + [Ignore("Changes in system clock can break test")] + [Test] + public async Task Can_ExpireAt() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + await RedisTyped.StoreAsync(cachedRecord); + + var in2Secs = DateTime.UtcNow.AddSeconds(2); + + await RedisTyped.ExpireAtAsync("key", in2Secs); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + await Task.Delay(3000); + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + } + + [Test] + public async Task Can_Delete_All_Items() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + await RedisTyped.StoreAsync(cachedRecord); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + + await RedisTyped.DeleteAllAsync(); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + + } + + [Test] + public async Task Can_Delete_All_Items_multiple_batches() + { + // Clear previous usage + await RedisAsync.DeleteAsync(RedisRaw.GetTypeIdsSetKey()); + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + var exists = RedisRaw.Exists(RedisRaw.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + + await RedisTyped.StoreAsync(cachedRecord); + + exists = RedisRaw.Exists(RedisRaw.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(1)); + + RedisConfig.CommandKeysBatchSize = 5; + + for (int i = 0; i < 50; i++) + { + cachedRecord.Id = "key" + i; + await RedisTyped.StoreAsync(cachedRecord); + } + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + + await RedisTyped.DeleteAllAsync(); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + + exists = RedisRaw.Exists(RedisRaw.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + + RedisConfig.Reset(); + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs index 0ebca270..18236cf2 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs @@ -7,42 +7,41 @@ namespace ServiceStack.Redis.Tests.Generic { - [TestFixture, Category("Integration")] - public class RedisTypedClientTests - { - public class CacheRecord - { - public CacheRecord() - { - this.Children = new List(); - } - - public string Id { get; set; } - public List Children { get; set; } - } - - public class CacheRecordChild - { - public string Id { get; set; } - public string Data { get; set; } - } - - protected RedisClient Redis; - protected IRedisTypedClient RedisTyped; - - protected void Log(string fmt, params object[] args) - { - Debug.WriteLine("{0}", string.Format(fmt, args).Trim()); - } - - [SetUp] - public virtual void OnBeforeEachTest() - { - if (Redis != null) Redis.Dispose(); - Redis = new RedisClient(TestConfig.SingleHost); - Redis.NamespacePrefix = "RedisTypedClientTests:"; - RedisTyped = Redis.As(); - } + [TestFixture, Category("Integration")] + public class RedisTypedClientTests + : RedisClientTestsBase + { + public class CacheRecord + { + public CacheRecord() + { + this.Children = new List(); + } + + public string Id { get; set; } + public List Children { get; set; } + } + + public class CacheRecordChild + { + public string Id { get; set; } + public string Data { get; set; } + } + + protected IRedisTypedClient RedisTyped; + + [SetUp] + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + Redis?.Dispose(); + Redis = new RedisClient(TestConfig.SingleHost) + { + NamespacePrefix = "RedisTypedClientTests:" + }; + RedisTyped = Redis.As(); + } [TearDown] public virtual void TearDown() @@ -53,51 +52,113 @@ public virtual void TearDown() [Test] public void Can_Store_with_Prefix() { - var expected = new CacheRecord() {Id = "123"}; + var expected = new CacheRecord() { Id = "123" }; RedisTyped.Store(expected); var current = Redis.Get("RedisTypedClientTests:urn:cacherecord:123"); Assert.AreEqual(expected.Id, current.Id); } - [Test] - public void Can_Expire() - { - var cachedRecord = new CacheRecord - { - Id = "key", - Children = { - new CacheRecordChild { Id = "childKey", Data = "data" } - } - }; - - RedisTyped.Store(cachedRecord); - RedisTyped.ExpireIn("key", TimeSpan.FromSeconds(1)); - Assert.That(RedisTyped.GetById("key"), Is.Not.Null); - Thread.Sleep(2000); - Assert.That(RedisTyped.GetById("key"), Is.Null); - } - - [Test] - public void Can_ExpireAt() - { - var cachedRecord = new CacheRecord - { - Id = "key", - Children = { - new CacheRecordChild { Id = "childKey", Data = "data" } - } - }; - - RedisTyped.Store(cachedRecord); - - var in1Sec = DateTime.Now.AddSeconds(1); - - RedisTyped.ExpireAt("key", in1Sec); - - Assert.That(RedisTyped.GetById("key"), Is.Not.Null); - Thread.Sleep(2000); - Assert.That(RedisTyped.GetById("key"), Is.Null); - } - } + [Test] + public void Can_Expire() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + RedisTyped.Store(cachedRecord); + RedisTyped.ExpireIn("key", TimeSpan.FromSeconds(1)); + Assert.That(RedisTyped.GetById("key"), Is.Not.Null); + Thread.Sleep(2000); + Assert.That(RedisTyped.GetById("key"), Is.Null); + } + + [Ignore("Changes in system clock can break test")] + [Test] + public void Can_ExpireAt() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + RedisTyped.Store(cachedRecord); + + var in2Secs = DateTime.UtcNow.AddSeconds(2); + + RedisTyped.ExpireAt("key", in2Secs); + + Assert.That(RedisTyped.GetById("key"), Is.Not.Null); + Thread.Sleep(3000); + Assert.That(RedisTyped.GetById("key"), Is.Null); + } + + [Test] + public void Can_Delete_All_Items() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + RedisTyped.Store(cachedRecord); + + Assert.That(RedisTyped.GetById("key"), Is.Not.Null); + + RedisTyped.DeleteAll(); + + Assert.That(RedisTyped.GetById("key"), Is.Null); + } + + [Test] + public void Can_Delete_All_Items_multiple_batches() + { + // Clear previous usage + Redis.Delete(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + var exists = Redis.Exists(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + + RedisTyped.Store(cachedRecord); + + exists = Redis.Exists(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + + Assert.That(exists, Is.EqualTo(1)); + + RedisConfig.CommandKeysBatchSize = 5; + + for (int i = 0; i < 50; i++) + { + cachedRecord.Id = "key" + i; + RedisTyped.Store(cachedRecord); + } + + Assert.That(RedisTyped.GetById("key"), Is.Not.Null); + + RedisTyped.DeleteAll(); + + exists = Redis.Exists(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + Assert.That(RedisTyped.GetById("key"), Is.Null); + + RedisConfig.Reset(); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.Async.cs new file mode 100644 index 00000000..663fdf97 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.Async.cs @@ -0,0 +1,230 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisTypedPipelineTestsAsync + : RedisClientTestsBaseAsync + { + public RedisTypedPipelineTestsAsync() + { + CleanMask = "gmultitest*"; + } + + private const string Key = "gmultitest"; + private const string ListKey = "gmultitest-list"; + private const string SetKey = "gmultitest-set"; + private const string SortedSetKey = "gmultitest-sortedset"; + + readonly ShipperFactory modelFactory = new ShipperFactory(); + private IRedisTypedClientAsync typedClient; + private Shipper model; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + typedClient = RedisAsync.As(); + model = modelFactory.CreateInstance(1); + } + + + [Test] + public async Task Can_call_single_operation_in_pipeline() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValueAsync(Key, model)); + + await pipeline.FlushAsync(); + } + + modelFactory.AssertIsEqual(await typedClient.GetValueAsync(Key), model); + } + + [Test] + public async Task No_commit_of_atomic_pipelines_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValueAsync(Key, model)); + } + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Exception_in_atomic_pipelines_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + try + { + await using var pipeline = typedClient.CreatePipeline(); + pipeline.QueueCommand(r => r.SetValueAsync(Key, model)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_pipeline() + { + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + + await pipeline.FlushAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_pipeline() + { + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); + + await pipeline.FlushAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_pipeline() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + var typedList = typedClient.Lists[ListKey]; + var typedSet = typedClient.Sets[SetKey]; + var typedSortedSet = typedClient.SortedSets[SortedSetKey]; + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToSetAsync(typedSet, modelFactory.CreateInstance(3))); + pipeline.QueueCommand(r => r.SetContainsItemAsync(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(4))); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(5))); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(6))); + pipeline.QueueCommand(r => r.GetListCountAsync(typedList), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSetCountAsync(typedSet), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSortedSetCountAsync(typedSortedSet), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await pipeline.FlushAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + + modelFactory.AssertListsAreEqual(await typedList.GetAllAsync(), new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) + }); + + Assert.That(await typedSet.GetAllAsync(), Is.EquivalentTo(new List + { + modelFactory.CreateInstance(3) + })); + + modelFactory.AssertListsAreEqual(await typedSortedSet.GetAllAsync(), new List + { + modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) + }); + } + + [Test] + public async Task Can_call_multi_string_operations_in_pipeline() + { + Shipper item1 = null; + Shipper item4 = null; + + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + pipeline.QueueCommand(r => r.GetAllItemsFromListAsync(typedList), x => results = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(typedList, 0), x => item1 = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(typedList, 4), x => item4 = x); + + await pipeline.FlushAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + + modelFactory.AssertListsAreEqual(results, new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) + }); + + modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); + Assert.That(item4, Is.Null); + } + [Test] + public async Task Pipeline_can_be_replayed() + { + const string keySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.Null); + await using var pipeline = typedClient.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(keySquared)); + await pipeline.FlushAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.EqualTo("1")); + await typedClient.RemoveEntryAsync(Key); + await typedClient.RemoveEntryAsync(keySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.Null); + + await pipeline.ReplayAsync(); + await pipeline.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.EqualTo("1")); + + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs index 36bff068..00d08630 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs @@ -6,202 +6,202 @@ namespace ServiceStack.Redis.Tests.Generic { - [TestFixture] - public class RedisTypedPipelineTests - : RedisClientTestsBase - { + [TestFixture] + public class RedisTypedPipelineTests + : RedisClientTestsBase + { public RedisTypedPipelineTests() { CleanMask = "gmultitest*"; } - private const string Key = "gmultitest"; - private const string ListKey = "gmultitest-list"; - private const string SetKey = "gmultitest-set"; - private const string SortedSetKey = "gmultitest-sortedset"; - - readonly ShipperFactory modelFactory = new ShipperFactory(); - private IRedisTypedClient typedClient; - private Shipper model; - - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); - - typedClient = Redis.GetTypedClient(); - model = modelFactory.CreateInstance(1); - } - - - [Test] - public void Can_call_single_operation_in_pipeline() - { - Assert.That(typedClient.GetValue(Key), Is.Null); - - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.SetEntry(Key, model)); - - pipeline.Flush(); - } - - modelFactory.AssertIsEqual(typedClient.GetValue(Key), model); - } - - [Test] - public void No_commit_of_atomic_pipelines_discards_all_commands() - { - Assert.That(typedClient.GetValue(Key), Is.Null); - - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.SetEntry(Key, model)); - } - - Assert.That(typedClient.GetValue(Key), Is.Null); - } - - [Test] - public void Exception_in_atomic_pipelines_discards_all_commands() - { - Assert.That(typedClient.GetValue(Key), Is.Null); - try - { - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.SetEntry(Key, model)); - throw new NotSupportedException(); - } - } - catch (NotSupportedException ignore) - { - Assert.That(typedClient.GetValue(Key), Is.Null); - } - } - - [Test] - public void Can_call_single_operation_3_Times_in_pipeline() - { - var typedList = typedClient.Lists[ListKey]; - Assert.That(typedList.Count, Is.EqualTo(0)); - - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); - - pipeline.Flush(); - } - - Assert.That(typedList.Count, Is.EqualTo(3)); - } - - [Test] - public void Can_call_single_operation_with_callback_3_Times_in_pipeline() - { - var results = new List(); - - var typedList = typedClient.Lists[ListKey]; - Assert.That(typedList.Count, Is.EqualTo(0)); - - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); - - pipeline.Flush(); - } - - Assert.That(typedList.Count, Is.EqualTo(3)); - Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); - } - - [Test] - public void Supports_different_operation_types_in_same_pipeline() - { - var incrementResults = new List(); - var collectionCounts = new List(); - var containsItem = false; - - var typedList = typedClient.Lists[ListKey]; - var typedSet = typedClient.Sets[SetKey]; - var typedSortedSet = typedClient.SortedSets[SortedSetKey]; - - Assert.That(typedClient.GetValue(Key), Is.Null); - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); - pipeline.QueueCommand(r => r.AddItemToSet(typedSet, modelFactory.CreateInstance(3))); - pipeline.QueueCommand(r => r.SetContainsItem(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); - pipeline.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(4))); - pipeline.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(5))); - pipeline.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(6))); - pipeline.QueueCommand(r => r.GetListCount(typedList), intResult => collectionCounts.Add(intResult)); - pipeline.QueueCommand(r => r.GetSetCount(typedSet), intResult => collectionCounts.Add(intResult)); - pipeline.QueueCommand(r => r.GetSortedSetCount(typedSortedSet), intResult => collectionCounts.Add(intResult)); - pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - - pipeline.Flush(); - } - - Assert.That(containsItem, Is.True); - Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); - Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); - Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); - - modelFactory.AssertListsAreEqual(typedList.GetAll(), new List - { - modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) - }); - - Assert.That(typedSet.GetAll(), Is.EquivalentTo(new List - { - modelFactory.CreateInstance(3) - })); - - modelFactory.AssertListsAreEqual(typedSortedSet.GetAll(), new List - { - modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) - }); - } - - [Test] - public void Can_call_multi_string_operations_in_pipeline() - { - Shipper item1 = null; - Shipper item4 = null; - - var results = new List(); - - var typedList = typedClient.Lists[ListKey]; - Assert.That(typedList.Count, Is.EqualTo(0)); - - using (var pipeline = typedClient.CreatePipeline()) - { - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); - pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); - pipeline.QueueCommand(r => r.GetAllItemsFromList(typedList), x => results = x); - pipeline.QueueCommand(r => r.GetItemFromList(typedList, 0), x => item1 = x); - pipeline.QueueCommand(r => r.GetItemFromList(typedList, 4), x => item4 = x); - - pipeline.Flush(); - } - - Assert.That(typedList.Count, Is.EqualTo(3)); - - modelFactory.AssertListsAreEqual(results, new List - { - modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) - }); - - modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); - Assert.That(item4, Is.Null); - } + private const string Key = "gmultitest"; + private const string ListKey = "gmultitest-list"; + private const string SetKey = "gmultitest-set"; + private const string SortedSetKey = "gmultitest-sortedset"; + + readonly ShipperFactory modelFactory = new ShipperFactory(); + private IRedisTypedClient typedClient; + private Shipper model; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + typedClient = Redis.As(); + model = modelFactory.CreateInstance(1); + } + + + [Test] + public void Can_call_single_operation_in_pipeline() + { + Assert.That(typedClient.GetValue(Key), Is.Null); + + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValue(Key, model)); + + pipeline.Flush(); + } + + modelFactory.AssertIsEqual(typedClient.GetValue(Key), model); + } + + [Test] + public void No_commit_of_atomic_pipelines_discards_all_commands() + { + Assert.That(typedClient.GetValue(Key), Is.Null); + + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValue(Key, model)); + } + + Assert.That(typedClient.GetValue(Key), Is.Null); + } + + [Test] + public void Exception_in_atomic_pipelines_discards_all_commands() + { + Assert.That(typedClient.GetValue(Key), Is.Null); + try + { + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValue(Key, model)); + throw new NotSupportedException(); + } + } + catch (NotSupportedException) + { + Assert.That(typedClient.GetValue(Key), Is.Null); + } + } + + [Test] + public void Can_call_single_operation_3_Times_in_pipeline() + { + var typedList = typedClient.Lists[ListKey]; + Assert.That(typedList.Count, Is.EqualTo(0)); + + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); + + pipeline.Flush(); + } + + Assert.That(typedList.Count, Is.EqualTo(3)); + } + + [Test] + public void Can_call_single_operation_with_callback_3_Times_in_pipeline() + { + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(typedList.Count, Is.EqualTo(0)); + + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); + + pipeline.Flush(); + } + + Assert.That(typedList.Count, Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public void Supports_different_operation_types_in_same_pipeline() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + var typedList = typedClient.Lists[ListKey]; + var typedSet = typedClient.Sets[SetKey]; + var typedSortedSet = typedClient.SortedSets[SortedSetKey]; + + Assert.That(typedClient.GetValue(Key), Is.Null); + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToSet(typedSet, modelFactory.CreateInstance(3))); + pipeline.QueueCommand(r => r.SetContainsItem(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); + pipeline.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(4))); + pipeline.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(5))); + pipeline.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(6))); + pipeline.QueueCommand(r => r.GetListCount(typedList), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSetCount(typedSet), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSortedSetCount(typedSortedSet), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + + pipeline.Flush(); + } + + Assert.That(containsItem, Is.True); + Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + + modelFactory.AssertListsAreEqual(typedList.GetAll(), new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) + }); + + Assert.That(typedSet.GetAll(), Is.EquivalentTo(new List + { + modelFactory.CreateInstance(3) + })); + + modelFactory.AssertListsAreEqual(typedSortedSet.GetAll(), new List + { + modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) + }); + } + + [Test] + public void Can_call_multi_string_operations_in_pipeline() + { + Shipper item1 = null; + Shipper item4 = null; + + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(typedList.Count, Is.EqualTo(0)); + + using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); + pipeline.QueueCommand(r => r.GetAllItemsFromList(typedList), x => results = x); + pipeline.QueueCommand(r => r.GetItemFromList(typedList, 0), x => item1 = x); + pipeline.QueueCommand(r => r.GetItemFromList(typedList, 4), x => item4 = x); + + pipeline.Flush(); + } + + Assert.That(typedList.Count, Is.EqualTo(3)); + + modelFactory.AssertListsAreEqual(results, new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) + }); + + modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); + Assert.That(item4, Is.Null); + } [Test] public void Pipeline_can_be_replayed() { @@ -229,5 +229,5 @@ public void Pipeline_can_be_replayed() } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.Async.cs new file mode 100644 index 00000000..9d096db7 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.Async.cs @@ -0,0 +1,240 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisTypedTransactionTestsAsync + : RedisClientTestsBaseAsync + { + private const string Key = "multitest"; + private const string ListKey = "multitest-list"; + private const string SetKey = "multitest-set"; + private const string SortedSetKey = "multitest-sortedset"; + + readonly ShipperFactory modelFactory = new ShipperFactory(); + private IRedisTypedClientAsync typedClient; + private Shipper model; + + public RedisTypedTransactionTestsAsync() + { + CleanMask = "multitest*"; + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + typedClient = RedisAsync.As(); + model = modelFactory.CreateInstance(1); + } + + [Test] + public async Task Can_call_single_operation_in_transaction() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.SetValueAsync(Key, model)); + + await trans.CommitAsync(); + } + + modelFactory.AssertIsEqual(await typedClient.GetValueAsync(Key), model); + } + + [Test] + public async Task No_commit_of_atomic_transactions_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.SetValueAsync(Key, model)); + } + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Exception_in_atomic_transactions_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + try + { + await using var trans = await typedClient.CreateTransactionAsync(); + trans.QueueCommand(r => r.SetValueAsync(Key, model)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_transaction() + { + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + + await trans.CommitAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_transaction() + { + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); + + await trans.CommitAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_transaction() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + var typedList = typedClient.Lists[ListKey]; + var typedSet = typedClient.Sets[SetKey]; + var typedSortedSet = typedClient.SortedSets[SortedSetKey]; + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToSetAsync(typedSet, modelFactory.CreateInstance(3))); + trans.QueueCommand(r => r.SetContainsItemAsync(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(4))); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(5))); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(6))); + trans.QueueCommand(r => r.GetListCountAsync(typedList), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSetCountAsync(typedSet), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSortedSetCountAsync(typedSortedSet), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await trans.CommitAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + + modelFactory.AssertListsAreEqual(await typedList.GetAllAsync(), new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) + }); + + Assert.That(await typedSet.GetAllAsync(), Is.EquivalentTo(new List + { + modelFactory.CreateInstance(3) + })); + + modelFactory.AssertListsAreEqual(await typedSortedSet.GetAllAsync(), new List + { + modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) + }); + } + + [Test] + public async Task Can_call_multi_string_operations_in_transaction() + { + Shipper item1 = null; + Shipper item4 = null; + + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + trans.QueueCommand(r => r.GetAllItemsFromListAsync(typedList), x => results = x); + trans.QueueCommand(r => r.GetItemFromListAsync(typedList, 0), x => item1 = x); + trans.QueueCommand(r => r.GetItemFromListAsync(typedList, 4), x => item4 = x); + + await trans.CommitAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + + modelFactory.AssertListsAreEqual(results, new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) + }); + + modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); + Assert.That(item4, Is.Null); + } + [Test] + // Operations that are not supported in older versions will look at server info to determine what to do. + // If server info is fetched each time, then it will interfer with transaction + public async Task Can_call_operation_not_supported_on_older_servers_in_transaction() + { + var temp = new byte[1]; + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync("key", 5, temp)); + await trans.CommitAsync(); + } + + + [Test] + public async Task Transaction_can_be_replayed() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + await trans.CommitAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + await NativeAsync.DelAsync(Key); + await NativeAsync.DelAsync(KeySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + + await trans.ReplayAsync(); + await trans.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs index 2dcee2b0..02df210c 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs @@ -6,201 +6,201 @@ namespace ServiceStack.Redis.Tests.Generic { - [TestFixture] - public class RedisTypedTransactionTests - : RedisClientTestsBase - { - private const string Key = "multitest"; - private const string ListKey = "multitest-list"; - private const string SetKey = "multitest-set"; - private const string SortedSetKey = "multitest-sortedset"; - - readonly ShipperFactory modelFactory = new ShipperFactory(); - private IRedisTypedClient typedClient; - private Shipper model; + [TestFixture] + public class RedisTypedTransactionTests + : RedisClientTestsBase + { + private const string Key = "multitest"; + private const string ListKey = "multitest-list"; + private const string SetKey = "multitest-set"; + private const string SortedSetKey = "multitest-sortedset"; + + readonly ShipperFactory modelFactory = new ShipperFactory(); + private IRedisTypedClient typedClient; + private Shipper model; public RedisTypedTransactionTests() { CleanMask = "multitest*"; } - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); - - typedClient = Redis.GetTypedClient(); - model = modelFactory.CreateInstance(1); - } - - [Test] - public void Can_call_single_operation_in_transaction() - { - Assert.That(typedClient.GetValue(Key), Is.Null); - - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.SetEntry(Key, model)); - - trans.Commit(); - } - - modelFactory.AssertIsEqual(typedClient.GetValue(Key), model); - } - - [Test] - public void No_commit_of_atomic_transactions_discards_all_commands() - { - Assert.That(typedClient.GetValue(Key), Is.Null); - - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.SetEntry(Key, model)); - } - - Assert.That(typedClient.GetValue(Key), Is.Null); - } - - [Test] - public void Exception_in_atomic_transactions_discards_all_commands() - { - Assert.That(typedClient.GetValue(Key), Is.Null); - try - { - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.SetEntry(Key, model)); - throw new NotSupportedException(); - } - } - catch (NotSupportedException ignore) - { - Assert.That(typedClient.GetValue(Key), Is.Null); - } - } - - [Test] - public void Can_call_single_operation_3_Times_in_transaction() - { - var typedList = typedClient.Lists[ListKey]; - Assert.That(typedList.Count, Is.EqualTo(0)); - - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); - - trans.Commit(); - } - - Assert.That(typedList.Count, Is.EqualTo(3)); - } - - [Test] - public void Can_call_single_operation_with_callback_3_Times_in_transaction() - { - var results = new List(); - - var typedList = typedClient.Lists[ListKey]; - Assert.That(typedList.Count, Is.EqualTo(0)); - - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); - - trans.Commit(); - } - - Assert.That(typedList.Count, Is.EqualTo(3)); - Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); - } - - [Test] - public void Supports_different_operation_types_in_same_transaction() - { - var incrementResults = new List(); - var collectionCounts = new List(); - var containsItem = false; - - var typedList = typedClient.Lists[ListKey]; - var typedSet = typedClient.Sets[SetKey]; - var typedSortedSet = typedClient.SortedSets[SortedSetKey]; - - Assert.That(typedClient.GetValue(Key), Is.Null); - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); - trans.QueueCommand(r => r.AddItemToSet(typedSet, modelFactory.CreateInstance(3))); - trans.QueueCommand(r => r.SetContainsItem(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); - trans.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(4))); - trans.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(5))); - trans.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(6))); - trans.QueueCommand(r => r.GetListCount(typedList), intResult => collectionCounts.Add(intResult)); - trans.QueueCommand(r => r.GetSetCount(typedSet), intResult => collectionCounts.Add(intResult)); - trans.QueueCommand(r => r.GetSortedSetCount(typedSortedSet), intResult => collectionCounts.Add(intResult)); - trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - - trans.Commit(); - } - - Assert.That(containsItem, Is.True); - Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); - Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); - Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); - - modelFactory.AssertListsAreEqual(typedList.GetAll(), new List - { - modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) - }); - - Assert.That(typedSet.GetAll(), Is.EquivalentTo(new List - { - modelFactory.CreateInstance(3) - })); - - modelFactory.AssertListsAreEqual(typedSortedSet.GetAll(), new List - { - modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) - }); - } - - [Test] - public void Can_call_multi_string_operations_in_transaction() - { - Shipper item1 = null; - Shipper item4 = null; - - var results = new List(); - - var typedList = typedClient.Lists[ListKey]; - Assert.That(typedList.Count, Is.EqualTo(0)); - - using (var trans = typedClient.CreateTransaction()) - { - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); - trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); - trans.QueueCommand(r => r.GetAllItemsFromList(typedList), x => results = x); - trans.QueueCommand(r => r.GetItemFromList(typedList, 0), x => item1 = x); - trans.QueueCommand(r => r.GetItemFromList(typedList, 4), x => item4 = x); - - trans.Commit(); - } - - Assert.That(typedList.Count, Is.EqualTo(3)); - - modelFactory.AssertListsAreEqual(results, new List - { - modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) - }); - - modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); - Assert.That(item4, Is.Null); - } + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + typedClient = Redis.As(); + model = modelFactory.CreateInstance(1); + } + + [Test] + public void Can_call_single_operation_in_transaction() + { + Assert.That(typedClient.GetValue(Key), Is.Null); + + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.SetValue(Key, model)); + + trans.Commit(); + } + + modelFactory.AssertIsEqual(typedClient.GetValue(Key), model); + } + + [Test] + public void No_commit_of_atomic_transactions_discards_all_commands() + { + Assert.That(typedClient.GetValue(Key), Is.Null); + + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.SetValue(Key, model)); + } + + Assert.That(typedClient.GetValue(Key), Is.Null); + } + + [Test] + public void Exception_in_atomic_transactions_discards_all_commands() + { + Assert.That(typedClient.GetValue(Key), Is.Null); + try + { + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.SetValue(Key, model)); + throw new NotSupportedException(); + } + } + catch (NotSupportedException) + { + Assert.That(typedClient.GetValue(Key), Is.Null); + } + } + + [Test] + public void Can_call_single_operation_3_Times_in_transaction() + { + var typedList = typedClient.Lists[ListKey]; + Assert.That(typedList.Count, Is.EqualTo(0)); + + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); + + trans.Commit(); + } + + Assert.That(typedList.Count, Is.EqualTo(3)); + } + + [Test] + public void Can_call_single_operation_with_callback_3_Times_in_transaction() + { + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(typedList.Count, Is.EqualTo(0)); + + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); + + trans.Commit(); + } + + Assert.That(typedList.Count, Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public void Supports_different_operation_types_in_same_transaction() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + var typedList = typedClient.Lists[ListKey]; + var typedSet = typedClient.Sets[SetKey]; + var typedSortedSet = typedClient.SortedSets[SortedSetKey]; + + Assert.That(typedClient.GetValue(Key), Is.Null); + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToSet(typedSet, modelFactory.CreateInstance(3))); + trans.QueueCommand(r => r.SetContainsItem(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); + trans.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(4))); + trans.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(5))); + trans.QueueCommand(r => r.AddItemToSortedSet(typedSortedSet, modelFactory.CreateInstance(6))); + trans.QueueCommand(r => r.GetListCount(typedList), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSetCount(typedSet), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSortedSetCount(typedSortedSet), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + + trans.Commit(); + } + + Assert.That(containsItem, Is.True); + Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + + modelFactory.AssertListsAreEqual(typedList.GetAll(), new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) + }); + + Assert.That(typedSet.GetAll(), Is.EquivalentTo(new List + { + modelFactory.CreateInstance(3) + })); + + modelFactory.AssertListsAreEqual(typedSortedSet.GetAll(), new List + { + modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) + }); + } + + [Test] + public void Can_call_multi_string_operations_in_transaction() + { + Shipper item1 = null; + Shipper item4 = null; + + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(typedList.Count, Is.EqualTo(0)); + + using (var trans = typedClient.CreateTransaction()) + { + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToList(typedList, modelFactory.CreateInstance(3))); + trans.QueueCommand(r => r.GetAllItemsFromList(typedList), x => results = x); + trans.QueueCommand(r => r.GetItemFromList(typedList, 0), x => item1 = x); + trans.QueueCommand(r => r.GetItemFromList(typedList, 4), x => item4 = x); + + trans.Commit(); + } + + Assert.That(typedList.Count, Is.EqualTo(3)); + + modelFactory.AssertListsAreEqual(results, new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) + }); + + modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); + Assert.That(item4, Is.Null); + } [Test] // Operations that are not supported in older versions will look at server info to determine what to do. // If server info is fetched each time, then it will interfer with transaction @@ -240,6 +240,6 @@ public void Transaction_can_be_replayed() Assert.That(Redis.GetValue(KeySquared), Is.EqualTo("1")); } } - - } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs b/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs index 988ec9c4..9283c309 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs @@ -4,85 +4,109 @@ using System.Threading; using NUnit.Framework; using ServiceStack.Text; +#if NETCORE +using System.Threading.Tasks; +#endif namespace ServiceStack.Redis.Tests.Integration { [Category("Integration")] - public class IntegrationTestBase - { - protected IRedisClientsManager CreateAndStartPoolManager( - string[] readWriteHosts, string[] readOnlyHosts) - { - return new PooledRedisClientManager(readWriteHosts, readOnlyHosts); - } + public class IntegrationTestBase + { + protected IRedisClientsManager CreateAndStartPoolManager( + string[] readWriteHosts, string[] readOnlyHosts) + { + return new PooledRedisClientManager(readWriteHosts, readOnlyHosts); + } - protected IRedisClientsManager CreateAndStartBasicCacheManager( - string[] readWriteHosts, string[] readOnlyHosts) - { - return new BasicRedisClientManager(readWriteHosts, readOnlyHosts); - } + protected IRedisClientsManager CreateAndStartManagerPool( + string[] readWriteHosts, string[] readOnlyHosts) + { + return new RedisManagerPool(readWriteHosts, new RedisPoolConfig + { + MaxPoolSize = 10 + }); + } - protected IRedisClientsManager CreateAndStartBasicManager( - string[] readWriteHosts, string[] readOnlyHosts) - { - return new BasicRedisClientManager(readWriteHosts, readOnlyHosts); - } + protected IRedisClientsManager CreateAndStartBasicCacheManager( + string[] readWriteHosts, string[] readOnlyHosts) + { + return new BasicRedisClientManager(readWriteHosts, readOnlyHosts); + } - [Conditional("DEBUG")] - protected static void Log(string fmt, params object[] args) - { - Debug.WriteLine(String.Format(fmt, args)); - } + protected IRedisClientsManager CreateAndStartBasicManager( + string[] readWriteHosts, string[] readOnlyHosts) + { + return new BasicRedisClientManager(readWriteHosts, readOnlyHosts); + } - protected void RunSimultaneously( - Func clientManagerFactory, - Action useClientFn) - { - var before = Stopwatch.GetTimestamp(); + [Conditional("DEBUG")] + protected static void Log(string fmt, params object[] args) + { + Debug.WriteLine(String.Format(fmt, args)); + } - const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + protected void RunSimultaneously( + Func clientManagerFactory, + Action useClientFn) + { + var before = Stopwatch.GetTimestamp(); - var clientAsyncResults = new List(); - using (var manager = clientManagerFactory(TestConfig.MasterHosts, TestConfig.SlaveHosts)) - { - for (var i = 0; i < noOfConcurrentClients; i++) - { - var clientNo = i; - var action = (Action)(() => useClientFn(manager, clientNo)); - clientAsyncResults.Add(action.BeginInvoke(null, null)); - } - } + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 - WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); +#if NETCORE + List tasks = new List(); +#else + var clientAsyncResults = new List(); +#endif + using (var manager = clientManagerFactory(TestConfig.MasterHosts, TestConfig.ReplicaHosts)) + { + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + var action = (Action)(() => useClientFn(manager, clientNo)); +#if NETCORE + tasks.Add(Task.Run(action)); +#else + clientAsyncResults.Add(action.BeginInvoke(null, null)); +#endif + } + } - Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); - } +#if NETCORE + Task.WaitAll(tasks.ToArray()); +#else + WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); +#endif - protected static void CheckHostCountMap(Dictionary hostCountMap) - { - Debug.WriteLine(TypeSerializer.SerializeToString(hostCountMap)); + Debug.WriteLine($"Time Taken: {(Stopwatch.GetTimestamp() - before) / 1000}"); + } - if (TestConfig.SlaveHosts.Length <= 1) return; + protected static void CheckHostCountMap(Dictionary hostCountMap) + { + Debug.WriteLine(TypeSerializer.SerializeToString(hostCountMap)); - var hostCount = 0; - foreach (var entry in hostCountMap) - { - if (entry.Value < 5) - { - Debug.WriteLine("ERROR: Host has unproportianate distrobution: " + entry.Value); - } - if (entry.Value > 60) - { - Debug.WriteLine("ERROR: Host has unproportianate distrobution: " + entry.Value); - } - hostCount += entry.Value; - } + if (TestConfig.ReplicaHosts.Length <= 1) return; - if (hostCount != 64) - { - Debug.WriteLine("ERROR: Invalid no of clients used"); - } - } + var hostCount = 0; + foreach (var entry in hostCountMap) + { + if (entry.Value < 5) + { + Debug.WriteLine("ERROR: Host has unproportionate distribution: " + entry.Value); + } + if (entry.Value > 60) + { + Debug.WriteLine("ERROR: Host has unproportionate distribution: " + entry.Value); + } + hostCount += entry.Value; + } - } + if (hostCount != 64) + { + Debug.WriteLine("ERROR: Invalid no of clients used"); + } + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedCacheClientManagerIntegrationTests.cs b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedCacheClientManagerIntegrationTests.cs deleted file mode 100644 index 28feb1c7..00000000 --- a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedCacheClientManagerIntegrationTests.cs +++ /dev/null @@ -1,67 +0,0 @@ -using System; -using Northwind.Common.DataModel; -using NUnit.Framework; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests.Integration -{ - [TestFixture] - public class MultiThreadedCacheClientManagerIntegrationTests - : IntegrationTestBase - { - private static string testData; - - [TestFixtureSetUp] - public void onBeforeTestFixture() - { - NorthwindData.LoadData(false); - - testData = TypeSerializer.SerializeToString(NorthwindData.Customers); - } - - [Test] - public void Pool_can_support_64_threads_using_the_client_simultaneously() - { - RunSimultaneously(CreateAndStartPoolManager, UseClient); - } - - [Test] - public void Basic_can_support_64_threads_using_the_client_simultaneously() - { - RunSimultaneously(CreateAndStartBasicCacheManager, UseClient); - } - - private static void UseClient(IRedisClientsManager manager, int clientNo) - { - var cacheManager = (IRedisClientCacheManager)manager; - - var host = ""; - - try - { - using (var client = cacheManager.GetReadOnlyCacheClient()) - { - host = ((IRedisClient)client).Host; - Log("Client '{0}' is using '{1}'", clientNo, host); - - var testClientKey = "test:" + host + ":" + clientNo; - client.Set(testClientKey, testData); - var result = client.Get(testClientKey) ?? ""; - - Log("\t{0} => {1} len {2} {3} len", testClientKey, - testData.Length, testData.Length == result.Length ? "==" : "!=", result.Length); - } - } - catch (NullReferenceException ex) - { - Log("NullReferenceException StackTrace: \n" + ex.StackTrace); - } - catch (Exception ex) - { - Log("\t[ERROR@{0}]: {1} => {2}", - host, ex.GetType().Name, ex.Message); - } - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedPoolIntegrationTests.cs b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedPoolIntegrationTests.cs index 4f9de4f6..6aa6e751 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedPoolIntegrationTests.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedPoolIntegrationTests.cs @@ -3,65 +3,72 @@ namespace ServiceStack.Redis.Tests.Integration { - [TestFixture] - public class MultiThreadedPoolIntegrationTests - : IntegrationTestBase - { - static Dictionary hostCountMap; + [TestFixture] + public class MultiThreadedPoolIntegrationTests + : IntegrationTestBase + { + static Dictionary hostCountMap; - public IRedisClientsManager CreateAndStartManager( - string[] readWriteHosts, string[] readOnlyHosts) - { - return new PooledRedisClientManager(readWriteHosts, readOnlyHosts, - new RedisClientManagerConfig { - MaxWritePoolSize = readWriteHosts.Length, - MaxReadPoolSize = readOnlyHosts.Length, - AutoStart = true, - }); - } + public IRedisClientsManager CreateAndStartManager( + string[] readWriteHosts, string[] readOnlyHosts) + { + return new PooledRedisClientManager(readWriteHosts, readOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = readWriteHosts.Length, + MaxReadPoolSize = readOnlyHosts.Length, + AutoStart = true, + }); + } - [SetUp] - public void BeforeEachTest() - { - hostCountMap = new Dictionary(); - } + [SetUp] + public void BeforeEachTest() + { + hostCountMap = new Dictionary(); + } - [TearDown] - public void AfterEachTest() - { - CheckHostCountMap(hostCountMap); - } + [TearDown] + public void AfterEachTest() + { + CheckHostCountMap(hostCountMap); + } - [Test] - public void Pool_can_support_64_threads_using_the_client_simultaneously() - { - RunSimultaneously(CreateAndStartManager, UseClient); - } + [Test] + public void Pool_can_support_64_threads_using_the_client_simultaneously() + { + RunSimultaneously(CreateAndStartManager, UseClient); + } - [Test] - public void Basic_can_support_64_threads_using_the_client_simultaneously() - { - RunSimultaneously(CreateAndStartBasicManager, UseClient); - } + [Test] + public void Basic_can_support_64_threads_using_the_client_simultaneously() + { + RunSimultaneously(CreateAndStartBasicManager, UseClient); + } - private static void UseClient(IRedisClientsManager manager, int clientNo) - { - using (var client = manager.GetReadOnlyClient()) - { - lock (hostCountMap) - { - int hostCount; - if (!hostCountMap.TryGetValue(client.Host, out hostCount)) - { - hostCount = 0; - } + [Test] + public void ManagerPool_can_support_64_threads_using_the_client_simultaneously() + { + RunSimultaneously(CreateAndStartManagerPool, UseClient); + } - hostCountMap[client.Host] = ++hostCount; - } + private static void UseClient(IRedisClientsManager manager, int clientNo) + { + using (var client = manager.GetReadOnlyClient()) + { + lock (hostCountMap) + { + int hostCount; + if (!hostCountMap.TryGetValue(client.Host, out hostCount)) + { + hostCount = 0; + } - Log("Client '{0}' is using '{1}'", clientNo, client.Host); - } - } + hostCountMap[client.Host] = ++hostCount; + } - } + Log("Client '{0}' is using '{1}'", clientNo, client.Host); + } + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs index 02f6e89d..92d0f551 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs @@ -2,104 +2,117 @@ using System.Collections.Generic; using System.Diagnostics; using System.Threading; -using Northwind.Common.DataModel; using NUnit.Framework; +using ServiceStack.Common.Tests.Models; using ServiceStack.Text; +#if NETCORE +using System.Threading.Tasks; +#endif namespace ServiceStack.Redis.Tests.Integration { - [TestFixture] - public class MultiThreadedRedisClientIntegrationTests - : IntegrationTestBase - { - private static string testData; - - [TestFixtureSetUp] - public void onBeforeTestFixture() - { - NorthwindData.LoadData(false); - - testData = TypeSerializer.SerializeToString(NorthwindData.Customers); - } - - [Test] - public void Can_support_64_threads_using_the_client_simultaneously() - { - var before = Stopwatch.GetTimestamp(); - - const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 - - var clientAsyncResults = new List(); - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - for (var i = 0; i < noOfConcurrentClients; i++) - { - var clientNo = i; - var action = (Action)(() => UseClientAsync(redisClient, clientNo)); - clientAsyncResults.Add(action.BeginInvoke(null, null)); - } - } - - WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); - - Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); - } - - [Test] - public void Can_support_64_threads_using_the_client_sequentially() - { - var before = Stopwatch.GetTimestamp(); - - const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 - - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - for (var i = 0; i < noOfConcurrentClients; i++) - { - var clientNo = i; - UseClient(redisClient, clientNo); - } - } - - Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); - } - - private void UseClientAsync(RedisClient client, int clientNo) - { - lock (this) - { - UseClient(client, clientNo); - } - } - - private static void UseClient(RedisClient client, int clientNo) - { - var host = ""; - - try - { - host = client.Host; - - Log("Client '{0}' is using '{1}'", clientNo, client.Host); - - var testClientKey = "test:" + host + ":" + clientNo; - client.SetEntry(testClientKey, testData); - var result = client.GetValue(testClientKey) ?? ""; - - Log("\t{0} => {1} len {2} {3} len", testClientKey, - testData.Length, testData.Length == result.Length ? "==" : "!=", result.Length); - - } - catch (NullReferenceException ex) - { - Debug.WriteLine("NullReferenceException StackTrace: \n" + ex.StackTrace); - } - catch (Exception ex) - { - Debug.WriteLine(String.Format("\t[ERROR@{0}]: {1} => {2}", - host, ex.GetType().Name, ex.Message)); - } - } - - } + [TestFixture] + public class MultiThreadedRedisClientIntegrationTests + : IntegrationTestBase + { + private static string testData; + + [OneTimeSetUp] + public void onBeforeTestFixture() + { + var results = 100.Times(x => ModelWithFieldsOfDifferentTypes.Create(x)); + + testData = TypeSerializer.SerializeToString(results); + } + + [Test] + public void Can_support_64_threads_using_the_client_simultaneously() + { + var before = Stopwatch.GetTimestamp(); + + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + +#if NETCORE + List tasks = new List(); +#else + var clientAsyncResults = new List(); +#endif + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + var action = (Action)(() => UseClientAsync(redisClient, clientNo)); +#if NETCORE + tasks.Add(Task.Run(action)); +#else + clientAsyncResults.Add(action.BeginInvoke(null, null)); +#endif + } + } +#if NETCORE + Task.WaitAll(tasks.ToArray()); +#else + WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); +#endif + Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); + } + + [Test] + public void Can_support_64_threads_using_the_client_sequentially() + { + var before = Stopwatch.GetTimestamp(); + + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + UseClient(redisClient, clientNo); + } + } + + Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); + } + + private void UseClientAsync(RedisClient client, int clientNo) + { + lock (this) + { + UseClient(client, clientNo); + } + } + + private static void UseClient(RedisClient client, int clientNo) + { + var host = ""; + + try + { + host = client.Host; + + Log("Client '{0}' is using '{1}'", clientNo, client.Host); + + var testClientKey = "test:" + host + ":" + clientNo; + client.SetValue(testClientKey, testData); + var result = client.GetValue(testClientKey) ?? ""; + + Log("\t{0} => {1} len {2} {3} len", testClientKey, + testData.Length, testData.Length == result.Length ? "==" : "!=", result.Length); + + } + catch (NullReferenceException ex) + { + Debug.WriteLine("NullReferenceException StackTrace: \n" + ex.StackTrace); + } + catch (Exception ex) + { + Debug.WriteLine(String.Format("\t[ERROR@{0}]: {1} => {2}", + host, ex.GetType().Name, ex.Message)); + } + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs b/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs index d0e672ba..a5aa4707 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs @@ -3,139 +3,140 @@ using System.Diagnostics; using System.Linq; using System.Threading; -using Northwind.Common.DataModel; using NUnit.Framework; -using ServiceStack.Common; +using ServiceStack.Common.Tests.Models; using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Integration { - [TestFixture, Category("Integration")] - public class RedisRegressionTestRun - { - private static string testData; - - [TestFixtureSetUp] - public void onBeforeTestFixture() - { - NorthwindData.LoadData(false); - - testData = TypeSerializer.SerializeToString(NorthwindData.Customers); - } - - [Test] - public void Can_support_64_threads_using_the_client_simultaneously() - { - var before = Stopwatch.GetTimestamp(); - - const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 - - var clientAsyncResults = new List(); - using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.SlaveHosts)) - { - manager.GetClient().Run(x => x.FlushAll()); - - for (var i = 0; i < noOfConcurrentClients; i++) - { - var clientNo = i; - var action = (Action)(() => UseClientAsync(manager, clientNo)); - clientAsyncResults.Add(action.BeginInvoke(null, null)); - } - } - - WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); - - Debug.WriteLine(String.Format("Completed in {0} ticks", (Stopwatch.GetTimestamp() - before))); - } - - [Test] - public void Can_run_series_of_operations_sequentially() - { - var before = Stopwatch.GetTimestamp(); - - const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 - - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - redisClient.FlushAll(); - - for (var i = 0; i < noOfConcurrentClients; i++) - { - var clientNo = i; - UseClient(redisClient, clientNo); - } - } - - Debug.WriteLine(String.Format("Completed in {0} ticks", (Stopwatch.GetTimestamp() - before))); - } - - private static void UseClientAsync(IRedisClientsManager manager, int clientNo) - { - using (var client = manager.GetReadOnlyClient()) - { - UseClient(client, clientNo); - } - } - - private static void UseClient(IRedisClient client, int clientNo) - { - var host = ""; - - try - { - host = client.Host; - - Debug.WriteLine(String.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); - var differentDbs = new[] { 1, 0, 2 }; - - foreach (var db in differentDbs) - { - client.Db = db; - - var testClientKey = "test:" + host + ":" + clientNo; - client.SetEntry(testClientKey, testData); - var result = client.GetValue(testClientKey) ?? ""; - LogResult(db, testClientKey, result); - - var testClientSetKey = "test+set:" + host + ":" + clientNo; - client.AddItemToSet(testClientSetKey, testData); - var resultSet = client.GetAllItemsFromSet(testClientSetKey); - LogResult(db, testClientKey, resultSet.ToList().FirstOrDefault()); - - var testClientListKey = "test+list:" + host + ":" + clientNo; - client.AddItemToList(testClientListKey, testData); - var resultList = client.GetAllItemsFromList(testClientListKey); - LogResult(db, testClientKey, resultList.FirstOrDefault()); - - } - } - catch (NullReferenceException ex) - { - Debug.WriteLine("NullReferenceException StackTrace: \n" + ex.StackTrace); + [TestFixture, Category("Integration")] + public class RedisRegressionTestRun + { + private static string testData; + + [OneTimeSetUp] + public void onBeforeTestFixture() + { + var results = 100.Times(x => ModelWithFieldsOfDifferentTypes.Create(x)); + + testData = TypeSerializer.SerializeToString(results); + } + + [Ignore("Can hang CI")] + [Test] + public void Can_support_64_threads_using_the_client_simultaneously() + { + var before = Stopwatch.GetTimestamp(); + + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + + var clientAsyncResults = new List(); + using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.ReplicaHosts)) + { + manager.GetClient().Run(x => x.FlushAll()); + + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + var action = (Action)(() => UseClientAsync(manager, clientNo)); + clientAsyncResults.Add(action.BeginInvoke(null, null)); + } + } + + WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); + + Debug.WriteLine(string.Format("Completed in {0} ticks", (Stopwatch.GetTimestamp() - before))); + + RedisStats.ToDictionary().PrintDump(); + } + + [Test] + public void Can_run_series_of_operations_sequentially() + { + var before = Stopwatch.GetTimestamp(); + + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + redisClient.FlushAll(); + + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + UseClient(redisClient, clientNo); + } + } + + Debug.WriteLine(String.Format("Completed in {0} ticks", (Stopwatch.GetTimestamp() - before))); + } + + private static void UseClientAsync(IRedisClientsManager manager, int clientNo) + { + using (var client = manager.GetClient()) + { + UseClient(client, clientNo); + } + } + + private static void UseClient(IRedisClient client, int clientNo) + { + var host = ""; + + try + { + host = client.Host; + + Debug.WriteLine(string.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); + var differentDbs = new[] { 1, 0, 2 }; + + foreach (var db in differentDbs) + { + client.Db = db; + + var testClientKey = "test:" + host + ":" + clientNo; + client.SetValue(testClientKey, testData); + var result = client.GetValue(testClientKey) ?? ""; + LogResult(db, testClientKey, result); + + var testClientSetKey = "test+set:" + host + ":" + clientNo; + client.AddItemToSet(testClientSetKey, testData); + var resultSet = client.GetAllItemsFromSet(testClientSetKey); + LogResult(db, testClientKey, resultSet.ToList().FirstOrDefault()); + + var testClientListKey = "test+list:" + host + ":" + clientNo; + client.AddItemToList(testClientListKey, testData); + var resultList = client.GetAllItemsFromList(testClientListKey); + LogResult(db, testClientKey, resultList.FirstOrDefault()); + } + } + catch (NullReferenceException ex) + { + Debug.WriteLine("NullReferenceException StackTrace: \n" + ex.StackTrace); Assert.Fail("NullReferenceException"); } - catch (Exception ex) - { - Debug.WriteLine(String.Format("\t[ERROR@{0}]: {1} => {2}", - host, ex.GetType().Name, ex)); + catch (Exception ex) + { + Debug.WriteLine(string.Format("\t[ERROR@{0}]: {1} => {2}", + host, ex.GetType().Name, ex)); Assert.Fail("Exception"); - } - } - - private static void LogResult(int db, string testClientKey, string resultData) - { - if (resultData.IsNullOrEmpty()) - { - Debug.WriteLine(String.Format("\tERROR@[{0}] NULL", db)); - return; - } - - Debug.WriteLine(String.Format("\t[{0}] {1} => {2} len {3} {4} len", - db, - testClientKey, - testData.Length, - testData.Length == resultData.Length ? "==" : "!=", resultData.Length)); - } - } + } + } + + private static void LogResult(int db, string testClientKey, string resultData) + { + if (resultData.IsNullOrEmpty()) + { + Debug.WriteLine(String.Format("\tERROR@[{0}] NULL", db)); + return; + } + + Debug.WriteLine(String.Format("\t[{0}] {1} => {2} len {3} {4} len", + db, + testClientKey, + testData.Length, + testData.Length == resultData.Length ? "==" : "!=", resultData.Length)); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/AuthIssue.cs b/tests/ServiceStack.Redis.Tests/Issues/AuthIssue.cs new file mode 100644 index 00000000..8418de39 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Issues/AuthIssue.cs @@ -0,0 +1,42 @@ +using System.Linq; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Issues +{ + public class AuthIssue + { + [Test] + [Ignore("Requires password on master")] + public void Does_retry_failed_commands_auth() + { + // -> Redis must have "requirepass testpassword" in config + var connstr = "testpassword@localhost"; + RedisStats.Reset(); + + var redisCtrl = new RedisClient(connstr); //RedisConfig.DefaultHost + redisCtrl.FlushAll(); + redisCtrl.SetClient("redisCtrl"); + + var redis = new RedisClient(connstr); + redis.SetClient("redisRetry"); + + var clientInfo = redisCtrl.GetClientsInfo(); + var redisId = clientInfo.First(m => m["name"] == "redisRetry")["id"]; + Assert.That(redisId.Length, Is.GreaterThan(0)); + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(1)); + + redis.OnBeforeFlush = () => + { + redisCtrl.KillClients(withId: redisId); + }; + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(2)); + Assert.That(redis.Get("retryCounter"), Is.EqualTo(2)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/ConnectionStringConfigIssues.cs b/tests/ServiceStack.Redis.Tests/Issues/ConnectionStringConfigIssues.cs new file mode 100644 index 00000000..f2f68d1b --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Issues/ConnectionStringConfigIssues.cs @@ -0,0 +1,27 @@ +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Issues +{ + public class ConnectionStringConfigIssues + { + [Test] + public void Can_use_password_with_equals() + { + var connString = "127.0.0.1?password=" + "p@55w0rd=".UrlEncode(); + + var config = connString.ToRedisEndpoint(); + Assert.That(config.Password, Is.EqualTo("p@55w0rd=")); + } + + [Test, Ignore("Requires redis-server configured with 'requirepass p@55w0rd='")] + public void Can_connect_to_redis_with_password_with_equals() + { + var connString = "127.0.0.1?password=" + "p@55w0rd=".UrlEncode(); + var redisManager = new PooledRedisClientManager(connString); + using (var redis = redisManager.GetClient()) + { + Assert.That(redis.Password, Is.EqualTo("p@55w0rd=")); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/DomainEventsTests.cs b/tests/ServiceStack.Redis.Tests/Issues/DomainEventsTests.cs index 3bbc9be6..f76dd556 100644 --- a/tests/ServiceStack.Redis.Tests/Issues/DomainEventsTests.cs +++ b/tests/ServiceStack.Redis.Tests/Issues/DomainEventsTests.cs @@ -33,7 +33,7 @@ public class DomainEventsTests public void Can_Retrieve_DomainEvents() { var userId = Guid.NewGuid(); - var client = new RedisClient("localhost"); + var client = new RedisClient(TestConfig.SingleHost); client.FlushAll(); client.As().Lists["urn:domainevents-" + userId].Add(new UserPromotedEvent { UserId = userId }); @@ -51,7 +51,7 @@ public void Can_Retrieve_DomainEvents() [Test] public void Can_from_Retrieve_DomainEvents_list() { - var client = new RedisClient("localhost"); + var client = new RedisClient(TestConfig.SingleHost); var users = client.As(); var userId = Guid.NewGuid(); @@ -66,7 +66,7 @@ public void Can_from_Retrieve_DomainEvents_list() users.Store(eventsForUser); - var all = users.GetAll(); + var all = users.GetAll(); } [Test] diff --git a/tests/ServiceStack.Redis.Tests/Issues/PipelineIssueTests.cs b/tests/ServiceStack.Redis.Tests/Issues/PipelineIssueTests.cs new file mode 100644 index 00000000..f675d178 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Issues/PipelineIssueTests.cs @@ -0,0 +1,81 @@ +using System; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Issues +{ + [TestFixture] + public class PipelineIssueTests + : RedisClientTestsBase + { + [Test] + public void Disposing_Client_Clears_Pipeline() + { + var clientMgr = new PooledRedisClientManager(TestConfig.SingleHost); + + using (var client = clientMgr.GetClient()) + { + client.Set("k1", "v1"); + client.Set("k2", "v2"); + client.Set("k3", "v3"); + + using (var pipe = client.CreatePipeline()) + { + pipe.QueueCommand(c => c.Get("k1"), p => { throw new Exception(); }); + pipe.QueueCommand(c => c.Get("k2")); + + try + { + pipe.Flush(); + } + catch (Exception) + { + //The exception is expected. Swallow it. + } + } + } + + using (var client = clientMgr.GetClient()) + { + Assert.AreEqual("v3", client.Get("k3")); + } + } + + [Test] + public void Can_Set_with_DateTime_in_Pipeline() + { + using (var clientsManager = new RedisManagerPool(TestConfig.SingleHost)) + { + bool result; + int value = 111; + string key = $"key:{value}"; + + // Set key with pipeline (batching many requests) + using (var redis = clientsManager.GetClient()) + { + using (var pipeline = redis.CreatePipeline()) + { + //Only atomic operations can be called within a Transaction or Pipeline + Assert.Throws(() => + pipeline.QueueCommand(r => r.Set(key, value, DateTime.Now.AddMinutes(1)), r => result = r)); + } + + using (var pipeline = redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.Set(key, value), r => result = r); + pipeline.QueueCommand(r => r.ExpireEntryAt(key, DateTime.Now.AddMinutes(1))); + + pipeline.Flush(); + } + } + + // Get key + using (var redis = clientsManager.GetClient()) + { + var res = redis.Get(key); + Assert.That(res, Is.EqualTo(value)); + } + } + + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/PooledRedisClientManagerIssues.cs b/tests/ServiceStack.Redis.Tests/Issues/PooledRedisClientManagerIssues.cs index 40c89cd4..c2147a3c 100644 --- a/tests/ServiceStack.Redis.Tests/Issues/PooledRedisClientManagerIssues.cs +++ b/tests/ServiceStack.Redis.Tests/Issues/PooledRedisClientManagerIssues.cs @@ -5,68 +5,68 @@ namespace ServiceStack.Redis.Tests.Issues { - [Ignore("Can't be included in Unit tests since it shutsdown redis server")] - [TestFixture] - public class PooledRedisClientManagerIssues - : RedisClientTestsBase - { - private static PooledRedisClientManager pool; - - public static void Stuff() - { - while (true) - { - RedisClient redisClient = null; - try - { - using (redisClient = (RedisClient)pool.GetClient()) - { - redisClient.Set("test", DateTime.Now); - } - } - catch(NotSupportedException nse) - { - Debug.WriteLine(redisClient.ToString()); - Assert.Fail(nse.Message); - } - catch (Exception e) - { - Debug.WriteLine(e.Message); - } - Thread.Sleep(10); - } - } + [Ignore("Can't be included in Unit tests since it shutsdown redis server")] + [TestFixture] + public class PooledRedisClientManagerIssues + : RedisClientTestsBase + { + private static PooledRedisClientManager pool; - [Test] - public void Issue37_Cannot_add_unknown_client_back_to_pool_exception() - { - pool = new PooledRedisClientManager(); - try - { - var threads = new Thread[100]; - for (var i = 0; i < threads.Length; i++) - { - threads[i] = new Thread(Stuff); - threads[i].Start(); - } - Debug.WriteLine("running, waiting 10secs.."); - Thread.Sleep(10000); - using (var redisClient = (RedisClient)pool.GetClient()) - { - Debug.WriteLine("shutdown Redis!"); - redisClient.Shutdown(); - } - } - catch (NotSupportedException nse) - { - Assert.Fail(nse.Message); - } - catch (Exception e) - { - Debug.WriteLine(e.Message); - } - - Thread.Sleep(5000); - } - } + public static void Stuff() + { + while (true) + { + RedisClient redisClient = null; + try + { + using (redisClient = (RedisClient)pool.GetClient()) + { + redisClient.Set("test", DateTime.Now); + } + } + catch (NotSupportedException nse) + { + Debug.WriteLine(redisClient.ToString()); + Assert.Fail(nse.Message); + } + catch (Exception e) + { + Debug.WriteLine(e.Message); + } + Thread.Sleep(10); + } + } + + [Test] + public void Issue37_Cannot_add_unknown_client_back_to_pool_exception() + { + pool = new PooledRedisClientManager(); + try + { + var threads = new Thread[100]; + for (var i = 0; i < threads.Length; i++) + { + threads[i] = new Thread(Stuff); + threads[i].Start(); + } + Debug.WriteLine("running, waiting 10secs.."); + Thread.Sleep(10000); + using (var redisClient = (RedisClient)pool.GetClient()) + { + Debug.WriteLine("shutdown Redis!"); + redisClient.Shutdown(); + } + } + catch (NotSupportedException nse) + { + Assert.Fail(nse.Message); + } + catch (Exception e) + { + Debug.WriteLine(e.Message); + } + + Thread.Sleep(5000); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/RedisCharacterizationTests.cs b/tests/ServiceStack.Redis.Tests/Issues/RedisCharacterizationTests.cs new file mode 100644 index 00000000..a7fdac4e --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Issues/RedisCharacterizationTests.cs @@ -0,0 +1,75 @@ +using System; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Issues +{ + public class RedisCharacterizationTests + { + private IRedisClientsManager _db1ClientManager; + private IRedisClientsManager _db2ClientManager; + + [SetUp] + public void SetUp() + { + foreach (var clientManager in new[] { _db1ClientManager, _db2ClientManager }) + { + if (clientManager != null) + { + using (var cacheClient = clientManager.GetCacheClient()) + { + cacheClient.Remove("key"); + } + } + } + } + + [Test] + public void BasicRedisClientManager_WhenUsingADatabaseOnARedisConnectionString_CorrectDatabaseIsUsed() + { + TestForDatabaseOnConnectionString(connectionString => new BasicRedisClientManager(connectionString)); + } + + [Test] + public void PooledRedisClientManager_WhenUsingADatabaseOnARedisConnectionString_CorrectDatabaseIsUsed() + { + TestForDatabaseOnConnectionString(connectionString => new PooledRedisClientManager(connectionString)); + } + + [Test] + public void RedisManagerPool_WhenUsingADatabaseOnARedisConnectionString_CorrectDatabaseIsUsed() + { + TestForDatabaseOnConnectionString(connectionString => new RedisManagerPool(connectionString)); + } + + private void TestForDatabaseOnConnectionString(Func factory) + { + _db1ClientManager = factory(TestConfig.SingleHost + "?db=1"); + _db2ClientManager = factory(TestConfig.SingleHost + "?db=2"); + + using (var cacheClient = _db1ClientManager.GetCacheClient()) + { + cacheClient.Set("key", "value"); + } + using (var cacheClient = _db2ClientManager.GetCacheClient()) + { + Assert.Null(cacheClient.Get("key")); + } + } + + [Test] + public void WhenUsingAnInitialDatabase_CorrectDatabaseIsUsed() + { + _db1ClientManager = new BasicRedisClientManager(1, TestConfig.SingleHost); + _db2ClientManager = new BasicRedisClientManager(2, TestConfig.SingleHost); + + using (var cacheClient = _db1ClientManager.GetCacheClient()) + { + cacheClient.Set("key", "value"); + } + using (var cacheClient = _db2ClientManager.GetCacheClient()) + { + Assert.Null(cacheClient.Get("key")); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs b/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs index 58a62b52..2b44d1d8 100644 --- a/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs +++ b/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs @@ -1,49 +1,47 @@ using System.Collections.Generic; +using System.Linq; using NUnit.Framework; +using ServiceStack.Redis.Generic; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Issues { - [TestFixture] - public class ReportedIssues - : RedisClientTestsBase - { - private readonly List storeMembers = new List { "one", "two", "three", "four" }; - - [Test] - public void Add_range_to_set_fails_if_first_command() - { - var redis = new RedisClient(TestConfig.SingleHost); - - redis.AddRangeToSet("testset", storeMembers); - - var members = Redis.GetAllItemsFromSet("testset"); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Transaction_fails_if_first_command() - { - var redis = new RedisClient(TestConfig.SingleHost); - using (var trans = redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue("A")); - - trans.Commit(); - } - Assert.That(redis.GetValue("A"), Is.EqualTo("1")); - } + [TestFixture] + public class ReportedIssues + : RedisClientTestsBase + { + private readonly List storeMembers = new List { "one", "two", "three", "four" }; + + [Test] + public void Add_range_to_set_fails_if_first_command() + { + Redis.AddRangeToSet("testset", storeMembers); + + var members = Redis.GetAllItemsFromSet("testset"); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Transaction_fails_if_first_command() + { + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue("A")); + + trans.Commit(); + } + Assert.That(Redis.GetValue("A"), Is.EqualTo("1")); + } [Test] public void Success_callback_fails_for_pipeline_using_GetItemScoreInSortedSet() { - var redis = new RedisClient(TestConfig.SingleHost); double score = 0; + Redis.AddItemToSortedSet("testzset", "value", 1); - redis.AddItemToSortedSet("testset", "value", 1); - - using (var pipeline = redis.CreatePipeline()) + using (var pipeline = Redis.CreatePipeline()) { - pipeline.QueueCommand(u => u.GetItemScoreInSortedSet("testset", "value"), x => + pipeline.QueueCommand(u => u.GetItemScoreInSortedSet("testzset", "value"), x => { //score should be assigned to 1 here score = x; @@ -54,5 +52,60 @@ public void Success_callback_fails_for_pipeline_using_GetItemScoreInSortedSet() Assert.That(score, Is.EqualTo(1)); } + + public class Test + { + public int Id { get; set; } + public string Name { get; set; } + + protected bool Equals(Test other) => Id == other.Id && Name == other.Name; + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((Test) obj); + } + + public override int GetHashCode() + { + unchecked + { + return (Id * 397) ^ (Name != null ? Name.GetHashCode() : 0); + } + } + } + + [Test] + public void Try_simulate_NRE_when_calling_GetAllEntriesFromHash_using_BasicRedisClientManager() + { + using (var redisManager = new BasicRedisClientManager(TestConfig.SingleHost)) + using (var redis = redisManager.GetClient()) + { + IRedisHash testHash = redis.As() + .GetHash("test-hash"); + + Assert.That(testHash.Count, Is.EqualTo(0)); + + var contents = testHash.GetAll(); + Assert.That(contents.Count, Is.EqualTo(0)); + + var test1 = new Test { Id = 1, Name = "Name1" }; + var test2 = new Test { Id = 2, Name = "Name2" }; + testHash["A"] = test1; + testHash["B"] = test2; + + contents = testHash.GetAll(); + + Assert.That(contents, Is.EqualTo(new Dictionary { + ["A"] = test1, + ["B"] = test2, + })); + + Assert.That(testHash["A"], Is.EqualTo(test1)); + Assert.That(testHash["B"], Is.EqualTo(test2)); + } + } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs b/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs index f5d1e509..fda38588 100644 --- a/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs +++ b/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs @@ -1,59 +1,123 @@ using System; using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Issues { - [TestFixture] - public class TransactionIssueTests - : RedisClientTestsBase - { - [Test] - public void Can_Get_and_Remove_multiple_keys_in_same_transaction() - { - 5.Times(x => Redis.Set("foo" + x, x)); - - var keys = Redis.SearchKeys("foo*"); - Assert.That(keys, Has.Count.EqualTo(5)); - - var dict = new Dictionary(); - using (var transaction = Redis.CreateTransaction()) - { - foreach (var key in keys) - { - var y = key; - transaction.QueueCommand(x => x.Get(y), val => dict.Add(y, val)); - } - transaction.QueueCommand(x => x.RemoveAll(keys)); - transaction.Commit(); - } - - Assert.That(dict, Has.Count.EqualTo(5)); - keys = Redis.SearchKeys("foo*"); - Assert.That(keys, Has.Count.EqualTo(0)); - } - - [Test] - public void Can_GetValues_and_Remove_multiple_keys_in_same_transaction() - { - 5.Times(x => Redis.Set("foo" + x, x)); - - var keys = Redis.SearchKeys("foo*"); - Assert.That(keys, Has.Count.EqualTo(5)); - - var values = new List(); - using (var transaction = Redis.CreateTransaction()) - { - transaction.QueueCommand(x => x.GetValues(keys), val => values = val); - transaction.QueueCommand(x => x.RemoveAll(keys)); - transaction.Commit(); - } - - Assert.That(values, Has.Count.EqualTo(5)); - keys = Redis.SearchKeys("foo*"); - Assert.That(keys, Has.Count.EqualTo(0)); - } - - } + [TestFixture] + public class TransactionIssueTests + : RedisClientTestsBase + { + [Test] + public void Can_Get_and_Remove_multiple_keys_in_same_transaction() + { + 5.Times(x => Redis.Set("foo" + x, x)); + + var keys = Redis.SearchKeys("foo*"); + Assert.That(keys, Has.Count.EqualTo(5)); + + var dict = new Dictionary(); + using (var transaction = Redis.CreateTransaction()) + { + foreach (var key in keys) + { + var y = key; + transaction.QueueCommand(x => x.Get(y), val => dict.Add(y, val)); + } + transaction.QueueCommand(x => x.RemoveAll(keys)); + transaction.Commit(); + } + + Assert.That(dict, Has.Count.EqualTo(5)); + keys = Redis.SearchKeys("foo*"); + Assert.That(keys, Has.Count.EqualTo(0)); + } + + [Test] + public void Can_GetValues_and_Remove_multiple_keys_in_same_transaction() + { + 5.Times(x => Redis.Set("foo" + x, x)); + + var keys = Redis.SearchKeys("foo*"); + Assert.That(keys, Has.Count.EqualTo(5)); + + var values = new List(); + using (var transaction = Redis.CreateTransaction()) + { + transaction.QueueCommand(x => x.GetValues(keys), val => values = val); + transaction.QueueCommand(x => x.RemoveAll(keys)); + transaction.Commit(); + } + + Assert.That(values, Has.Count.EqualTo(5)); + keys = Redis.SearchKeys("foo*"); + Assert.That(keys, Has.Count.EqualTo(0)); + } + + + private void CheckThisConnection() + { + "CheckingThisConnection()...".Print(); + + using (var redis = GetRedisClient()) + using (var trans = redis.CreateTransaction()) + { + trans.QueueCommand( + r => r.SetEntryInHash("Test", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test2", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test3", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test4", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test5", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test6", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test7", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test8", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test9", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test10", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test11", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test12", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test13", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test14", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test15", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test16", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test17", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test18", "Price", "123")); + trans.QueueCommand( + r => r.SetEntryInHash("Test19", "Price", "123")); + trans.Commit(); + } + } + + private void CheckConnection(object state) + { + Task.Factory.StartNew(CheckThisConnection); + } + + [Ignore("Integration"), Test] + public void Can_queue_large_transaction() + { + var q = new System.Threading.Timer(CheckConnection, null, 30000, 2); + + Thread.Sleep(30000); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LexTests.Async.cs b/tests/ServiceStack.Redis.Tests/LexTests.Async.cs new file mode 100644 index 00000000..d00e3f0f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LexTests.Async.cs @@ -0,0 +1,115 @@ +using NUnit.Framework; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class LexTestsAsync + : RedisClientTestsBaseAsync + { + readonly string[] values = "a,b,c,d,e,f,g".Split(','); + + [SetUp] + public async Task SetUp() + { + await RedisAsync.FlushAllAsync(); + foreach(var x in values) + { + await NativeAsync.ZAddAsync("zset", 0, x.ToUtf8Bytes()); + } + } + + [Test] + public async Task Can_ZRangeByLex_all_entries() + { + var results = await NativeAsync.ZRangeByLexAsync("zset", "-", "+"); + + Assert.That(results.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(values)); + + results = await NativeAsync.ZRangeByLexAsync("zset", "-", "+", 1, 3); + Assert.That(results.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "b", "c", "d" })); + } + + [Test] + public async Task Can_ZRangeByLex_Desc() + { + var descInclusive = await NativeAsync.ZRangeByLexAsync("zset", "-", "[c"); + Assert.That(descInclusive.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "a", "b", "c" })); + + var descExclusive = await NativeAsync.ZRangeByLexAsync("zset", "-", "(c"); + Assert.That(descExclusive.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "a", "b" })); + } + + [Test] + public async Task Can_ZRangeByLex_Min_and_Max() + { + var range = await NativeAsync.ZRangeByLexAsync("zset", "[aaa", "(g"); + Assert.That(range.Map(x => x.FromUtf8Bytes()), + Is.EquivalentTo(new[] { "b", "c", "d", "e", "f" })); + } + + [Test] + public async Task Can_ZlexCount() + { + var total = await NativeAsync.ZLexCountAsync("zset", "-", "+"); + Assert.That(total, Is.EqualTo(values.Length)); + + Assert.That(await NativeAsync.ZLexCountAsync("zset", "-", "[c"), Is.EqualTo(3)); + Assert.That(await NativeAsync.ZLexCountAsync("zset", "-", "(c"), Is.EqualTo(2)); + } + + [Test] + public async Task Can_ZRemRangeByLex() + { + var removed = await NativeAsync.ZRemRangeByLexAsync("zset", "[aaa", "(g"); + Assert.That(removed, Is.EqualTo(5)); + + var remainder = await NativeAsync.ZRangeByLexAsync("zset", "-", "+"); + Assert.That(remainder.Map(x => x.FromUtf8Bytes()), Is.EqualTo(new[] { "a", "g" })); + } + + [Test] + public async Task Can_SearchSortedSet() + { + Assert.That(await RedisAsync.SearchSortedSetAsync("zset"), Is.EquivalentTo(values)); + Assert.That(await RedisAsync.SearchSortedSetAsync("zset", start: "-"), Is.EquivalentTo(values)); + Assert.That(await RedisAsync.SearchSortedSetAsync("zset", end: "+"), Is.EquivalentTo(values)); + + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", start: "[aaa")).Count, Is.EqualTo(values.Length - 1)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", end: "(g")).Count, Is.EqualTo(values.Length - 1)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "[aaa", "(g")).Count, Is.EqualTo(values.Length - 2)); + + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "a", "c")).Count, Is.EqualTo(3)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "[a", "[c")).Count, Is.EqualTo(3)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "a", "(c")).Count, Is.EqualTo(2)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "(a", "(c")).Count, Is.EqualTo(1)); + } + + [Test] + public async Task Can_SearchSortedSetCount() + { + Assert.That(await RedisAsync.SearchSortedSetAsync("zset"), Is.EquivalentTo(values)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", start: "-"), Is.EqualTo(values.Length)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", end: "+"), Is.EqualTo(values.Length)); + + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", start: "[aaa"), Is.EqualTo(values.Length - 1)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", end: "(g"), Is.EqualTo(values.Length - 1)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "[aaa", "(g"), Is.EqualTo(values.Length - 2)); + + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "a", "c"), Is.EqualTo(3)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "[a", "[c"), Is.EqualTo(3)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "a", "(c"), Is.EqualTo(2)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "(a", "(c"), Is.EqualTo(1)); + } + + [Test] + public async Task Can_RemoveRangeFromSortedSetBySearch() + { + var removed = await RedisAsync.RemoveRangeFromSortedSetBySearchAsync("zset", "[aaa", "(g"); + Assert.That(removed, Is.EqualTo(5)); + + var remainder = await RedisAsync.SearchSortedSetAsync("zset"); + Assert.That(remainder, Is.EqualTo(new[] { "a", "g" })); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LexTests.cs b/tests/ServiceStack.Redis.Tests/LexTests.cs new file mode 100644 index 00000000..215bd77a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LexTests.cs @@ -0,0 +1,111 @@ +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class LexTests + : RedisClientTestsBase + { + readonly string[] values = "a,b,c,d,e,f,g".Split(','); + + [SetUp] + public void SetUp() + { + Redis.FlushAll(); + values.Each(x => Redis.ZAdd("zset", 0, x.ToUtf8Bytes())); + } + + [Test] + public void Can_ZRangeByLex_all_entries() + { + var results = Redis.ZRangeByLex("zset", "-", "+"); + + Assert.That(results.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(values)); + + results = Redis.ZRangeByLex("zset", "-", "+", 1, 3); + Assert.That(results.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "b", "c", "d" })); + } + + [Test] + public void Can_ZRangeByLex_Desc() + { + var descInclusive = Redis.ZRangeByLex("zset", "-", "[c"); + Assert.That(descInclusive.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "a", "b", "c" })); + + var descExclusive = Redis.ZRangeByLex("zset", "-", "(c"); + Assert.That(descExclusive.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "a", "b" })); + } + + [Test] + public void Can_ZRangeByLex_Min_and_Max() + { + var range = Redis.ZRangeByLex("zset", "[aaa", "(g"); + Assert.That(range.Map(x => x.FromUtf8Bytes()), + Is.EquivalentTo(new[] { "b", "c", "d", "e", "f" })); + } + + [Test] + public void Can_ZlexCount() + { + var total = Redis.ZLexCount("zset", "-", "+"); + Assert.That(total, Is.EqualTo(values.Length)); + + Assert.That(Redis.ZLexCount("zset", "-", "[c"), Is.EqualTo(3)); + Assert.That(Redis.ZLexCount("zset", "-", "(c"), Is.EqualTo(2)); + } + + [Test] + public void Can_ZRemRangeByLex() + { + var removed = Redis.ZRemRangeByLex("zset", "[aaa", "(g"); + Assert.That(removed, Is.EqualTo(5)); + + var remainder = Redis.ZRangeByLex("zset", "-", "+"); + Assert.That(remainder.Map(x => x.FromUtf8Bytes()), Is.EqualTo(new[] { "a", "g" })); + } + + [Test] + public void Can_SearchSortedSet() + { + Assert.That(Redis.SearchSortedSet("zset"), Is.EquivalentTo(values)); + Assert.That(Redis.SearchSortedSet("zset", start: "-"), Is.EquivalentTo(values)); + Assert.That(Redis.SearchSortedSet("zset", end: "+"), Is.EquivalentTo(values)); + + Assert.That(Redis.SearchSortedSet("zset", start: "[aaa").Count, Is.EqualTo(values.Length - 1)); + Assert.That(Redis.SearchSortedSet("zset", end: "(g").Count, Is.EqualTo(values.Length - 1)); + Assert.That(Redis.SearchSortedSet("zset", "[aaa", "(g").Count, Is.EqualTo(values.Length - 2)); + + Assert.That(Redis.SearchSortedSet("zset", "a", "c").Count, Is.EqualTo(3)); + Assert.That(Redis.SearchSortedSet("zset", "[a", "[c").Count, Is.EqualTo(3)); + Assert.That(Redis.SearchSortedSet("zset", "a", "(c").Count, Is.EqualTo(2)); + Assert.That(Redis.SearchSortedSet("zset", "(a", "(c").Count, Is.EqualTo(1)); + } + + [Test] + public void Can_SearchSortedSetCount() + { + Assert.That(Redis.SearchSortedSet("zset"), Is.EquivalentTo(values)); + Assert.That(Redis.SearchSortedSetCount("zset", start: "-"), Is.EqualTo(values.Length)); + Assert.That(Redis.SearchSortedSetCount("zset", end: "+"), Is.EqualTo(values.Length)); + + Assert.That(Redis.SearchSortedSetCount("zset", start: "[aaa"), Is.EqualTo(values.Length - 1)); + Assert.That(Redis.SearchSortedSetCount("zset", end: "(g"), Is.EqualTo(values.Length - 1)); + Assert.That(Redis.SearchSortedSetCount("zset", "[aaa", "(g"), Is.EqualTo(values.Length - 2)); + + Assert.That(Redis.SearchSortedSetCount("zset", "a", "c"), Is.EqualTo(3)); + Assert.That(Redis.SearchSortedSetCount("zset", "[a", "[c"), Is.EqualTo(3)); + Assert.That(Redis.SearchSortedSetCount("zset", "a", "(c"), Is.EqualTo(2)); + Assert.That(Redis.SearchSortedSetCount("zset", "(a", "(c"), Is.EqualTo(1)); + } + + [Test] + public void Can_RemoveRangeFromSortedSetBySearch() + { + var removed = Redis.RemoveRangeFromSortedSetBySearch("zset", "[aaa", "(g"); + Assert.That(removed, Is.EqualTo(5)); + + var remainder = Redis.SearchSortedSet("zset"); + Assert.That(remainder, Is.EqualTo(new[] { "a", "g" })); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs b/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs new file mode 100644 index 00000000..7a1f4ad2 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs @@ -0,0 +1,158 @@ +// Copyright (c) Service Stack LLC. All Rights Reserved. +// License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt + +#if !NETCORE +using System.Data; +#endif +using NUnit.Framework; +using ServiceStack.Configuration; +using ServiceStack.Text; +using System; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class FreeLicenseUsageTests : LicenseUsageTests + { + [SetUp] + public void SetUp() + { + LicenseUtils.RemoveLicense(); + JsConfig.Reset(); + } + + [TearDown] + public void TearDown() + { + Licensing.RegisterLicense(Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE")); + } + + [Test] + public void Allows_access_of_21_types() + { + Access20Types(); + Access20Types(); + } + + [Test] + public void Throws_on_access_of_21_types() + { + using (var client = new RedisClient(TestConfig.SingleHost)) + { + Access20Types(); + Access20Types(); + + Assert.Throws(() => + client.As()); + } + } + + [Test, Ignore("Takes too long - but works!")] + public void Allows_access_of_6000_operations() + { + using (var client = new RedisClient(TestConfig.SingleHost)) + { + 6000.Times(() => client.Get("any key")); + } + } + + [Test, Ignore("Takes too long - but works!")] + public void Throws_on_access_of_6100_operations() + { + using (var client = new RedisClient(TestConfig.SingleHost)) + { + Assert.Throws(() => + 6100.Times(() => client.Get("any key"))); + } + } + } + + [TestFixture] + public class RegisteredLicenseUsageTests : LicenseUsageTests + { + [Test] + public void Allows_access_of_21_types() + { +#if NETCORE + Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); +#else + Licensing.RegisterLicense(new AppSettings().GetString("servicestack:license")); +#endif + + using (var client = new RedisClient(TestConfig.SingleHost)) + { + Access20Types(); + Access20Types(); + + client.As(); + } + } + + [Test, Ignore("Takes too long - but works!")] + public void Allows_access_of_6100_operations() + { +#if NETCORE + Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); +#else + Licensing.RegisterLicense(new AppSettings().GetString("servicestack:license")); +#endif + + using (var client = new RedisClient(TestConfig.SingleHost)) + { + 6100.Times(() => client.Get("any key")); + } + } + } + + class T01 { public int Id { get; set; } } + class T02 { public int Id { get; set; } } + class T03 { public int Id { get; set; } } + class T04 { public int Id { get; set; } } + class T05 { public int Id { get; set; } } + class T06 { public int Id { get; set; } } + class T07 { public int Id { get; set; } } + class T08 { public int Id { get; set; } } + class T09 { public int Id { get; set; } } + class T10 { public int Id { get; set; } } + class T11 { public int Id { get; set; } } + class T12 { public int Id { get; set; } } + class T13 { public int Id { get; set; } } + class T14 { public int Id { get; set; } } + class T15 { public int Id { get; set; } } + class T16 { public int Id { get; set; } } + class T17 { public int Id { get; set; } } + class T18 { public int Id { get; set; } } + class T19 { public int Id { get; set; } } + class T20 { public int Id { get; set; } } + class T21 { public int Id { get; set; } } + + public class LicenseUsageTests + { + protected void Access20Types() + { + using (var client = new RedisClient(TestConfig.SingleHost)) + { + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + client.As(); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LuaCachedScripts.Async.cs b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.Async.cs new file mode 100644 index 00000000..4cec7af8 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.Async.cs @@ -0,0 +1,297 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + [Category("Async")] + public class LuaCachedScriptsAsync + { + private const string LuaScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = 0 +local len = 0 +local results = {} + +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(results, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit + +return results +"; + + private static async Task AddTestKeysAsync(IRedisClientAsync redis, int count) + { + for (int i = 0; i < count; i++) + await redis.SetValueAsync("key:" + i, "value:" + i); + } + + [Test] + public async Task Can_call_repeated_scans_in_LUA() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecLuaAsync(LuaScript, "key:*", "10"); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecLuaAsync(LuaScript, "key:*", "40"); + Assert.That(r.Children.Count, Is.EqualTo(20)); + } + + [Test] + public async Task Can_call_Cached_Lua() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_call_Cached_Lua_even_after_script_is_flushed() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + await ((IRedisNativeClientAsync)redis).ScriptFlushAsync(); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_call_repeated_scans_in_LUA_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecLuaAsync(LuaScript, null, new[] { "key:*", "10" }); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecLuaAsync(LuaScript, null, new[] { "key:*", "40" }); + Assert.That(r.Children.Count, Is.EqualTo(20)); + } + + [Test] + public async Task Can_call_Cached_Lua_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_call_Cached_Lua_even_after_script_is_flushed_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + await ((IRedisNativeClientAsync)redis).ScriptFlushAsync(); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + private const string KeyAttributesScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = 0 +local len = 0 +local keys = {} + +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(keys, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit + +local keyAttrs = {} +for i,key in ipairs(keys) do + local type = redis.call('type', key)['ok'] + local pttl = redis.call('pttl', key) + local size = 0 + if type == 'string' then + size = redis.call('strlen', key) + elseif type == 'list' then + size = redis.call('llen', key) + elseif type == 'set' then + size = redis.call('scard', key) + elseif type == 'zset' then + size = redis.call('zcard', key) + elseif type == 'hash' then + size = redis.call('hlen', key) + end + + local attrs = {['id'] = key, ['type'] = type, ['ttl'] = pttl, ['size'] = size} + + table.insert(keyAttrs, attrs) +end + +return cjson.encode(keyAttrs)"; + + [Test] + public async Task Can_call_script_with_complex_response() + { + await using var redis = new RedisClient().ForAsyncOnly(); + var r = await redis.ExecCachedLuaAsync(KeyAttributesScript, sha1 => + redis.ExecLuaShaAsStringAsync(sha1, "key:*", "10")); + + r.Print(); + + var results = r.FromJson>(); + + Assert.That(results.Count, Is.EqualTo(10)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + + [Test] + public async Task Can_call_script_with_complex_response_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + var r = await redis.ExecCachedLuaAsync(KeyAttributesScript, sha1 => + redis.ExecLuaShaAsStringAsync(sha1, null, new[] { "key:*", "10" })); + + r.Print(); + + var results = r.FromJson>(); + + Assert.That(results.Count, Is.EqualTo(10)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + + public class SearchResult + { + public string Id { get; set; } + public string Type { get; set; } + public long Ttl { get; set; } + public long Size { get; set; } + } + + [Test] + public async Task Can_merge_multiple_SearchResults() + { + await using var Redis = new RedisClient().ForAsyncOnly(); + var limit = 10; + var query = "key:*"; + + List keys = new List(limit); + await foreach (var key in Redis.ScanAllKeysAsync(pattern: query, pageSize: limit)) + { + keys.Add(key); + if (keys.Count == limit) break; + } + + var keyTypes = new Dictionary(); + var keyTtls = new Dictionary(); + var keySizes = new Dictionary(); + + if (keys.Count > 0) + { + await using (var pipeline = Redis.CreatePipeline()) + { + foreach (var key in keys) + pipeline.QueueCommand(r => r.TypeAsync(key), x => keyTypes[key] = x); + + foreach (var key in keys) + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).PTtlAsync(key), x => keyTtls[key] = x); + + await pipeline.FlushAsync(); + } + + await using (var pipeline = Redis.CreatePipeline()) + { + foreach (var entry in keyTypes) + { + var key = entry.Key; + switch (entry.Value) + { + case "string": + pipeline.QueueCommand(r => r.GetStringCountAsync(key), x => keySizes[key] = x); + break; + case "list": + pipeline.QueueCommand(r => r.GetListCountAsync(key), x => keySizes[key] = x); + break; + case "set": + pipeline.QueueCommand(r => r.GetSetCountAsync(key), x => keySizes[key] = x); + break; + case "zset": + pipeline.QueueCommand(r => r.GetSortedSetCountAsync(key), x => keySizes[key] = x); + break; + case "hash": + pipeline.QueueCommand(r => r.GetHashCountAsync(key), x => keySizes[key] = x); + break; + } + } + + await pipeline.FlushAsync(); + } + } + + var results = keys.Map(x => new SearchResult + { + Id = x, + Type = keyTypes.GetValueOrDefault(x), + Ttl = keyTtls.GetValueOrDefault(x), + Size = keySizes.GetValueOrDefault(x), + }); + + Assert.That(results.Count, Is.EqualTo(limit)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs new file mode 100644 index 00000000..80491fec --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs @@ -0,0 +1,232 @@ +using System.Collections.Generic; +using System.Linq; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class LuaCachedScripts + { + private const string LuaScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = 0 +local len = 0 +local results = {} + +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(results, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit + +return results +"; + + private static void AddTestKeys(RedisClient redis, int count) + { + count.Times(i => + redis.SetValue("key:" + i, "value:" + i)); + } + + [Test] + public void Can_call_repeated_scans_in_LUA() + { + using (var redis = new RedisClient()) + { + AddTestKeys(redis, 20); + + var r = redis.ExecLua(LuaScript, "key:*", "10"); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = redis.ExecLua(LuaScript, "key:*", "40"); + Assert.That(r.Children.Count, Is.EqualTo(20)); + } + } + + [Test] + public void Can_call_Cached_Lua() + { + using (var redis = new RedisClient()) + { + AddTestKeys(redis, 20); + + var r = redis.ExecCachedLua(LuaScript, sha1 => + redis.ExecLuaSha(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = redis.ExecCachedLua(LuaScript, sha1 => + redis.ExecLuaSha(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + } + + [Test] + public void Can_call_Cached_Lua_even_after_script_is_flushed() + { + using (var redis = new RedisClient()) + { + AddTestKeys(redis, 20); + + var r = redis.ExecCachedLua(LuaScript, sha1 => + redis.ExecLuaSha(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + redis.ScriptFlush(); + + r = redis.ExecCachedLua(LuaScript, sha1 => + redis.ExecLuaSha(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + } + + private const string KeyAttributesScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = 0 +local len = 0 +local keys = {} + +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(keys, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit + +local keyAttrs = {} +for i,key in ipairs(keys) do + local type = redis.call('type', key)['ok'] + local pttl = redis.call('pttl', key) + local size = 0 + if type == 'string' then + size = redis.call('strlen', key) + elseif type == 'list' then + size = redis.call('llen', key) + elseif type == 'set' then + size = redis.call('scard', key) + elseif type == 'zset' then + size = redis.call('zcard', key) + elseif type == 'hash' then + size = redis.call('hlen', key) + end + + local attrs = {['id'] = key, ['type'] = type, ['ttl'] = pttl, ['size'] = size} + + table.insert(keyAttrs, attrs) +end + +return cjson.encode(keyAttrs)"; + + [Test] + public void Can_call_script_with_complex_response() + { + using (var redis = new RedisClient()) + { + var r = redis.ExecCachedLua(KeyAttributesScript, sha1 => + redis.ExecLuaShaAsString(sha1, "key:*", "10")); + + r.Print(); + + var results = r.FromJson>(); + + Assert.That(results.Count, Is.EqualTo(10)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + } + + public class SearchResult + { + public string Id { get; set; } + public string Type { get; set; } + public long Ttl { get; set; } + public long Size { get; set; } + } + + [Test] + public void Can_merge_multiple_SearchResults() + { + var Redis = new RedisClient(); + var limit = 10; + var query = "key:*"; + + var keys = Redis.ScanAllKeys(pattern: query, pageSize: limit) + .Take(limit).ToList(); + + var keyTypes = new Dictionary(); + var keyTtls = new Dictionary(); + var keySizes = new Dictionary(); + + if (keys.Count > 0) + { + using (var pipeline = Redis.CreatePipeline()) + { + keys.Each(key => + pipeline.QueueCommand(r => r.Type(key), x => keyTypes[key] = x)); + + keys.Each(key => + pipeline.QueueCommand(r => ((RedisNativeClient)r).PTtl(key), x => keyTtls[key] = x)); + + pipeline.Flush(); + } + + using (var pipeline = Redis.CreatePipeline()) + { + foreach (var entry in keyTypes) + { + var key = entry.Key; + switch (entry.Value) + { + case "string": + pipeline.QueueCommand(r => r.GetStringCount(key), x => keySizes[key] = x); + break; + case "list": + pipeline.QueueCommand(r => r.GetListCount(key), x => keySizes[key] = x); + break; + case "set": + pipeline.QueueCommand(r => r.GetSetCount(key), x => keySizes[key] = x); + break; + case "zset": + pipeline.QueueCommand(r => r.GetSortedSetCount(key), x => keySizes[key] = x); + break; + case "hash": + pipeline.QueueCommand(r => r.GetHashCount(key), x => keySizes[key] = x); + break; + } + } + + pipeline.Flush(); + } + } + + var results = keys.Map(x => new SearchResult + { + Id = x, + Type = keyTypes.GetValueOrDefault(x), + Ttl = keyTtls.GetValueOrDefault(x), + Size = keySizes.GetValueOrDefault(x), + }); + + Assert.That(results.Count, Is.EqualTo(limit)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ManagedListGenericTests.cs b/tests/ServiceStack.Redis.Tests/ManagedListGenericTests.cs index 3386dd7f..1da877b8 100644 --- a/tests/ServiceStack.Redis.Tests/ManagedListGenericTests.cs +++ b/tests/ServiceStack.Redis.Tests/ManagedListGenericTests.cs @@ -15,7 +15,7 @@ public class ManagedListGenericTests public void TestSetUp() { if (redisManager != null) redisManager.Dispose(); - redisManager = new BasicRedisClientManager(TestConfig.SingleHost); + redisManager = TestConfig.BasicClientManger; redisManager.Exec(r => r.FlushAll()); } diff --git a/tests/ServiceStack.Redis.Tests/Messaging/InspectingMqTests.cs b/tests/ServiceStack.Redis.Tests/Messaging/InspectingMqTests.cs deleted file mode 100644 index 10a8aebf..00000000 --- a/tests/ServiceStack.Redis.Tests/Messaging/InspectingMqTests.cs +++ /dev/null @@ -1,91 +0,0 @@ -using System; -using System.Collections.Generic; -using NUnit.Framework; -using ServiceStack.Text; -using ServiceStack.Messaging; -using ServiceStack.Messaging.Tests; -using ServiceStack.Redis.Messaging; - -namespace ServiceStack.Redis.Tests -{ - public class MessageType1 - { - public string Name { get; set; } - } - - public class MessageType2 - { - public string Name { get; set; } - } - - public class MessageType3 - { - public string Name { get; set; } - } - - public class MessageStat - { - public string MqName { get; set; } - public string MqType { get; set; } - public string MessageType { get; set; } - public int Count { get; set; } - } - - [TestFixture] - public class InspectingMqTests - { - IMessageService mqService; - IRedisClientsManager redisManager; - - [TestFixtureSetUp] - public void TestFixtureSetUp() - { - redisManager = new BasicRedisClientManager(); - mqService = new RedisMqHost(redisManager, 2, null); - - redisManager.Exec(r => r.FlushAll()); - - using (var mqPublisher = mqService.MessageFactory.CreateMessageProducer()) - { - var i=0; - mqPublisher.Publish(new MessageType1 { Name = "msg-" + i++ }); - mqPublisher.Publish(new MessageType2 { Name = "msg-" + i++ }); - mqPublisher.Publish(new MessageType2 { Name = "msg-" + i++ }); - mqPublisher.Publish(new MessageType3 { Name = "msg-" + i++ }); - mqPublisher.Publish(new MessageType3 { Name = "msg-" + i++ }); - mqPublisher.Publish(new MessageType3 { Name = "msg-" + i++ }); - } - } - - [TestFixtureTearDown] - public void TestFixtureTearDown() - { - mqService.Dispose(); - redisManager.Dispose(); - } - - [Test] - public void Can_get_RedisMq_stats() - { - var redisMqStats = new List(); - using (var redis = redisManager.GetClient()) - { - var keys = redis.SearchKeys("mq:*"); - foreach (var key in keys) - { - if (redis.GetEntryType(key) != RedisKeyType.List) continue; - - - var stat = new MessageStat { - MqName = key, - }; - - redisMqStats.Add(stat); - } - } - - redisMqStats.PrintDump(); - } - } -} - diff --git a/tests/ServiceStack.Redis.Tests/Messaging/RedisTransientMessagingHostTests.cs b/tests/ServiceStack.Redis.Tests/Messaging/RedisTransientMessagingHostTests.cs deleted file mode 100644 index ecdba7bc..00000000 --- a/tests/ServiceStack.Redis.Tests/Messaging/RedisTransientMessagingHostTests.cs +++ /dev/null @@ -1,55 +0,0 @@ -using NUnit.Framework; -using ServiceStack.Messaging; -using ServiceStack.Messaging.Tests; -using ServiceStack.Redis.Messaging; - -namespace ServiceStack.Redis.Tests.Messaging -{ - [Category("Integration")] - public class RedisTransientMessagingHostTests - : TransientServiceMessagingTests - { - private IRedisClientsManager clientManager; - private RedisTransientMessageFactory factory; - - public override void OnBeforeEachTest() - { - ResetConnections(); - - using (var client = clientManager.GetClient()) - { - client.FlushAll(); - } - - base.OnBeforeEachTest(); - } - - protected override IMessageFactory CreateMessageFactory() - { - return factory; - } - - protected override TransientMessageServiceBase CreateMessagingService() - { - return factory.MessageService; - } - - private void ResetConnections() - { - if (clientManager != null) - { - clientManager.Dispose(); - clientManager = null; - } - - if (factory != null) - { - factory.Dispose(); - factory = null; - } - - clientManager = new BasicRedisClientManager(TestConfig.MasterHosts); - factory = new RedisTransientMessageFactory(clientManager); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/NetCoreTestsRunner.cs b/tests/ServiceStack.Redis.Tests/NetCoreTestsRunner.cs new file mode 100644 index 00000000..24012602 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/NetCoreTestsRunner.cs @@ -0,0 +1,39 @@ +//NUnitLite isn't recognized in VS2017 - shouldn't need NUnitLite with NUnit 3.5+ https://github.com/nunit/dotnet-test-nunit +#if NUNITLITE +using NUnitLite; +using NUnit.Common; +using System.Reflection; +using ServiceStack; +using ServiceStack.Text; +using System; +using System.Globalization; +using System.Threading; + +namespace NUnitLite.Tests +{ + public class NetCoreTestsRunner + { + /// + /// The main program executes the tests. Output may be routed to + /// various locations, depending on the arguments passed. + /// + /// Run with --help for a full list of arguments supported + /// + public static int Main(string[] args) + { + var licenseKey = Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); + if (licenseKey.IsNullOrEmpty()) + throw new ArgumentNullException("SERVICESTACK_LICENSE", "Add Environment variable for SERVICESTACK_LICENSE"); + + Licensing.RegisterLicense(licenseKey); + //"ActivatedLicenseFeatures: ".Print(LicenseUtils.ActivatedLicenseFeatures()); + + CultureInfo.DefaultThreadCurrentCulture = new CultureInfo("en-US"); + JsConfig.InitStatics(); + //JsonServiceClient client = new JsonServiceClient(); + var writer = new ExtendedTextWrapper(Console.Out); + return new AutoRun(((IReflectableType)typeof(NetCoreTestsRunner)).GetTypeInfo().Assembly).Execute(args, writer, Console.In); + } + } +} +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/NorthwindPerfTests.cs b/tests/ServiceStack.Redis.Tests/NorthwindPerfTests.cs deleted file mode 100644 index 96769066..00000000 --- a/tests/ServiceStack.Redis.Tests/NorthwindPerfTests.cs +++ /dev/null @@ -1,45 +0,0 @@ -using System; -using System.Diagnostics; -using Northwind.Common.DataModel; -using NUnit.Framework; -using ServiceStack.DataAccess; - -namespace ServiceStack.Redis.Tests -{ - [TestFixture] - public class NorthwindPerfTests - { - [Test][Ignore("Hangs")] - public void Load_Northwind_database_with_redis() - { - NorthwindData.LoadData(false); - GC.Collect(); - - var stopWatch = new Stopwatch(); - stopWatch.Start(); - - using (var client = new RedisClient(TestConfig.SingleHost)) - { - LoadNorthwindData(client); - } - - Debug.WriteLine("stopWatch.ElapsedMilliseconds: " + stopWatch.ElapsedMilliseconds); - } - - private static void LoadNorthwindData(IBasicPersistenceProvider persistenceProvider) - { - persistenceProvider.StoreAll(NorthwindData.Categories); - persistenceProvider.StoreAll(NorthwindData.Customers); - persistenceProvider.StoreAll(NorthwindData.Employees); - persistenceProvider.StoreAll(NorthwindData.Shippers); - persistenceProvider.StoreAll(NorthwindData.Orders); - persistenceProvider.StoreAll(NorthwindData.Products); - persistenceProvider.StoreAll(NorthwindData.OrderDetails); - persistenceProvider.StoreAll(NorthwindData.CustomerCustomerDemos); - persistenceProvider.StoreAll(NorthwindData.Regions); - persistenceProvider.StoreAll(NorthwindData.Territories); - persistenceProvider.StoreAll(NorthwindData.EmployeeTerritories); - } - } - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ObjectSerializerTests.cs b/tests/ServiceStack.Redis.Tests/ObjectSerializerTests.cs index 5ad26d34..6edc5271 100644 --- a/tests/ServiceStack.Redis.Tests/ObjectSerializerTests.cs +++ b/tests/ServiceStack.Redis.Tests/ObjectSerializerTests.cs @@ -3,17 +3,20 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class ObjectSerializerTests - { - [Test] - public void Can_serialize_object_with_default_serializer() - { - var ser = new ObjectSerializer(); - string test = "test"; - var serialized = ser.Serialize(test); - Assert.AreEqual(test, ser.Deserialize(serialized)); - } + [TestFixture] +#if NETCORE + [Ignore(".NET Core does not implement BinaryFormatter required for these tests")] +#endif + public class ObjectSerializerTests + { + [Test] + public void Can_serialize_object_with_default_serializer() + { + var ser = new ObjectSerializer(); + string test = "test"; + var serialized = ser.Serialize(test); + Assert.AreEqual(test, ser.Deserialize(serialized)); + } [Test] public void Can_serialize_object_with_optimized_serializer() { @@ -26,6 +29,6 @@ public void Can_serialize_object_with_optimized_serializer() serialized = ser.Serialize(testFloat); Assert.AreEqual(testFloat, ser.Deserialize(serialized)); } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.Async.cs b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.Async.cs new file mode 100644 index 00000000..fdf442f6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.Async.cs @@ -0,0 +1,440 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class PooledRedisClientManagerTestsAsync + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.VerifyMasterConnections = false; + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + RedisConfig.VerifyMasterConnections = true; + } + + readonly string[] testReadWriteHosts = new[] { + "readwrite1", "readwrite2:6000", "192.168.0.1", "localhost" + }; + + readonly string[] testReadOnlyHosts = new[] { + "read1", "read2:7000", "127.0.0.1" + }; + + private string firstReadWriteHost; + private string firstReadOnlyHost; + + [SetUp] + public void OnBeforeEachTest() + { + firstReadWriteHost = testReadWriteHosts[0]; + firstReadOnlyHost = testReadOnlyHosts[0]; + } + + public IRedisClientsManagerAsync CreateManager(string[] readWriteHosts, string[] readOnlyHosts, int? defaultDb = null) + { + return new PooledRedisClientManager(readWriteHosts, readOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = readWriteHosts.Length, + MaxReadPoolSize = readOnlyHosts.Length, + AutoStart = false, + DefaultDb = defaultDb + }); + } + public IRedisClientsManagerAsync CreateManager(params string[] readWriteHosts) + { + return CreateManager(readWriteHosts, readWriteHosts); + } + + public IRedisClientsManagerAsync CreateManager() + { + return CreateManager(testReadWriteHosts, testReadOnlyHosts); + } + + public IRedisClientsManagerAsync CreateAndStartManager() + { + var manager = CreateManager(); + ((PooledRedisClientManager)manager).Start(); + return manager; + } + + [Test] + public async Task Cant_get_client_without_calling_Start() + { + await using var manager = CreateManager(); + try + { + var client = await manager.GetClientAsync(); + } + catch (InvalidOperationException) + { + return; + } + Assert.Fail("Should throw"); + } + + [Test] + public async Task Can_change_db_for_client_PooledRedisClientManager() + { + await using IRedisClientsManagerAsync db1 = new PooledRedisClientManager(1, new string[] { TestConfig.SingleHost }); + await using IRedisClientsManagerAsync db2 = new PooledRedisClientManager(2, new string[] { TestConfig.SingleHost }); + var val = Environment.TickCount; + var key = "test" + val; + var db1c = await db1.GetClientAsync(); + var db2c = await db2.GetClientAsync(); + try + { + await db1c.SetAsync(key, val); + Assert.That(await db2c.GetAsync(key), Is.EqualTo(0)); + Assert.That(await db1c.GetAsync(key), Is.EqualTo(val)); + } + finally + { + await db1c.RemoveAsync(key); + } + } + + [Test] + public async Task Can_change_db_for_client_RedisManagerPool() + { + await using IRedisClientsManagerAsync db1 = new RedisManagerPool(TestConfig.SingleHost + "?db=1"); + await using IRedisClientsManagerAsync db2 = new RedisManagerPool(TestConfig.SingleHost + "?db=2"); + var val = Environment.TickCount; + var key = "test" + val; + var db1c = await db1.GetClientAsync(); + var db2c = await db2.GetClientAsync(); + try + { + await db1c.SetAsync(key, val); + Assert.That(await db2c.GetAsync(key), Is.EqualTo(0)); + Assert.That(await db1c.GetAsync(key), Is.EqualTo(val)); + } + finally + { + await db1c.RemoveAsync(key); + } + } + + [Test] + public async Task Can_change_db_for_client_BasicRedisClientManager() + { + await using IRedisClientsManagerAsync db1 = new BasicRedisClientManager(1, new string[] { TestConfig.SingleHost }); + await using IRedisClientsManagerAsync db2 = new BasicRedisClientManager(2, new string[] { TestConfig.SingleHost }); + var val = Environment.TickCount; + var key = "test" + val; + var db1c = await db1.GetClientAsync(); + var db2c = await db2.GetClientAsync(); + try + { + await db1c.SetAsync(key, val); + Assert.That(await db2c.GetAsync(key), Is.EqualTo(0)); + Assert.That(await db1c.GetAsync(key), Is.EqualTo(val)); + } + finally + { + await db1c.RemoveAsync(key); + } + } + + [Test] + public async Task Can_get_client_after_calling_Start() + { + await using var manager = CreateAndStartManager(); + var client = await manager.GetClientAsync(); + } + + [Test] + public async Task Can_get_ReadWrite_client() + { + await using var manager = CreateAndStartManager(); + var client = await manager.GetClientAsync(); + + AssertClientHasHost(client, firstReadWriteHost); + } + + private static void AssertClientHasHost(IRedisClientAsync client, string hostWithOptionalPort) + { + var parts = hostWithOptionalPort.Split(':'); + var port = parts.Length > 1 ? int.Parse(parts[1]) : RedisConfig.DefaultPort; + + Assert.That(client.Host, Is.EqualTo(parts[0])); + Assert.That(client.Port, Is.EqualTo(port)); + } + + [Test] + public async Task Can_get_ReadOnly_client() + { + await using var manager = CreateAndStartManager(); + var client = await manager.GetReadOnlyClientAsync(); + + AssertClientHasHost(client, firstReadOnlyHost); + } + + [Test] + public async Task Does_loop_through_ReadWrite_hosts() + { + await using var manager = CreateAndStartManager(); + var client1 = await manager.GetClientAsync(); + await client1.DisposeAsync(); + var client2 = await manager.GetClientAsync(); + var client3 = await manager.GetClientAsync(); + var client4 = await manager.GetClientAsync(); + var client5 = await manager.GetClientAsync(); + + AssertClientHasHost(client1, testReadWriteHosts[0]); + AssertClientHasHost(client2, testReadWriteHosts[1]); + AssertClientHasHost(client3, testReadWriteHosts[2]); + AssertClientHasHost(client4, testReadWriteHosts[3]); + AssertClientHasHost(client5, testReadWriteHosts[0]); + } + + [Test] + public async Task Does_loop_through_ReadOnly_hosts() + { + await using var manager = CreateAndStartManager(); + var client1 = await manager.GetReadOnlyClientAsync(); + await client1.DisposeAsync(); + var client2 = await manager.GetReadOnlyClientAsync(); + await client2.DisposeAsync(); + var client3 = await manager.GetReadOnlyClientAsync(); + var client4 = await manager.GetReadOnlyClientAsync(); + var client5 = await manager.GetReadOnlyClientAsync(); + + AssertClientHasHost(client1, testReadOnlyHosts[0]); + AssertClientHasHost(client2, testReadOnlyHosts[1]); + AssertClientHasHost(client3, testReadOnlyHosts[2]); + AssertClientHasHost(client4, testReadOnlyHosts[0]); + AssertClientHasHost(client5, testReadOnlyHosts[1]); + } + + [Test] + public async Task Can_have_different_pool_size_and_host_configurations() + { + var writeHosts = new[] { "readwrite1" }; + var readHosts = new[] { "read1", "read2" }; + + const int poolSizeMultiplier = 4; + + await using IRedisClientsManagerAsync manager = new PooledRedisClientManager(writeHosts, readHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = writeHosts.Length * poolSizeMultiplier, + MaxReadPoolSize = readHosts.Length * poolSizeMultiplier, + AutoStart = true, + } + ); + //A poolsize of 4 will not block getting 4 clients + await using (var client1 = await manager.GetClientAsync()) + await using (var client2 = await manager.GetClientAsync()) + await using (var client3 = await manager.GetClientAsync()) + await using (var client4 = await manager.GetClientAsync()) + { + AssertClientHasHost(client1, writeHosts[0]); + AssertClientHasHost(client2, writeHosts[0]); + AssertClientHasHost(client3, writeHosts[0]); + AssertClientHasHost(client4, writeHosts[0]); + } + + //A poolsize of 8 will not block getting 8 clients + await using (var client1 = await manager.GetReadOnlyClientAsync()) + await using (var client2 = await manager.GetReadOnlyClientAsync()) + await using (var client3 = await manager.GetReadOnlyClientAsync()) + await using (var client4 = await manager.GetReadOnlyClientAsync()) + await using (var client5 = await manager.GetReadOnlyClientAsync()) + await using (var client6 = await manager.GetReadOnlyClientAsync()) + await using (var client7 = await manager.GetReadOnlyClientAsync()) + await using (var client8 = await manager.GetReadOnlyClientAsync()) + { + AssertClientHasHost(client1, readHosts[0]); + AssertClientHasHost(client2, readHosts[1]); + AssertClientHasHost(client3, readHosts[0]); + AssertClientHasHost(client4, readHosts[1]); + AssertClientHasHost(client5, readHosts[0]); + AssertClientHasHost(client6, readHosts[1]); + AssertClientHasHost(client7, readHosts[0]); + AssertClientHasHost(client8, readHosts[1]); + } + } + + [Test] + public async Task Does_block_ReadWrite_clients_pool() + { + await using IRedisClientsManagerAsync manager = CreateAndStartManager(); + var delay = TimeSpan.FromSeconds(1); + var client1 = await manager.GetClientAsync(); + var client2 = await manager.GetClientAsync(); + var client3 = await manager.GetClientAsync(); + var client4 = await manager.GetClientAsync(); + +#pragma warning disable IDE0039 // Use local function + Action func = async delegate +#pragma warning restore IDE0039 // Use local function + { + await Task.Delay(delay + TimeSpan.FromSeconds(0.5)); + await client4.DisposeAsync(); + }; + +#if NETCORE + _ = Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + + var start = DateTime.Now; + + var client5 = await manager.GetClientAsync(); + + Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); + + AssertClientHasHost(client1, testReadWriteHosts[0]); + AssertClientHasHost(client2, testReadWriteHosts[1]); + AssertClientHasHost(client3, testReadWriteHosts[2]); + AssertClientHasHost(client4, testReadWriteHosts[3]); + AssertClientHasHost(client5, testReadWriteHosts[3]); + } + + [Test] + public async Task Does_block_ReadOnly_clients_pool() + { + var delay = TimeSpan.FromSeconds(1); + + await using var manager = CreateAndStartManager(); + var client1 = await manager.GetReadOnlyClientAsync(); + var client2 = await manager.GetReadOnlyClientAsync(); + var client3 = await manager.GetReadOnlyClientAsync(); + +#pragma warning disable IDE0039 // Use local function + Action func = async delegate +#pragma warning restore IDE0039 // Use local function + { + await Task.Delay(delay + TimeSpan.FromSeconds(0.5)); + await client3.DisposeAsync(); + }; +#if NETCORE + _ =Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + var start = DateTime.Now; + + var client4 = await manager.GetReadOnlyClientAsync(); + + Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); + + AssertClientHasHost(client1, testReadOnlyHosts[0]); + AssertClientHasHost(client2, testReadOnlyHosts[1]); + AssertClientHasHost(client3, testReadOnlyHosts[2]); + AssertClientHasHost(client4, testReadOnlyHosts[2]); + } + + [Test] + public async Task Does_throw_TimeoutException_when_PoolTimeout_exceeded() + { + await using IRedisClientsManagerAsync manager = new PooledRedisClientManager(testReadWriteHosts, testReadOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = 4, + MaxReadPoolSize = 4, + AutoStart = false, + }); + ((PooledRedisClientManager)manager).PoolTimeout = 100; + + ((PooledRedisClientManager)manager).Start(); + + var masters = 4.Times(i => manager.GetClientAsync()); + + try + { + await manager.GetClientAsync(); + Assert.Fail("Should throw TimeoutException"); + } + catch (TimeoutException ex) + { + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); + } + + for (int i = 0; i < 4; i++) + { + await manager.GetReadOnlyClientAsync(); + } + + try + { + await manager.GetReadOnlyClientAsync(); + Assert.Fail("Should throw TimeoutException"); + } + catch (TimeoutException ex) + { + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); + } + } + + //[Ignore("tempromental integration test")] + //[Test] + //public void Can_support_64_threads_using_the_client_simultaneously() + //{ + // const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + // var clientUsageMap = new Dictionary(); + + // var clientAsyncResults = new List(); + // using (var manager = CreateAndStartManager()) + // { + // for (var i = 0; i < noOfConcurrentClients; i++) + // { + // var clientNo = i; + // var action = (Action)(() => UseClient(manager, clientNo, clientUsageMap)); + // clientAsyncResults.Add(action.BeginInvoke(null, null)); + // } + // } + + // WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); + + // RedisStats.ToDictionary().PrintDump(); + + // Debug.WriteLine(TypeSerializer.SerializeToString(clientUsageMap)); + + // var hostCount = 0; + // foreach (var entry in clientUsageMap) + // { + // Assert.That(entry.Value, Is.GreaterThanOrEqualTo(2), "Host has unproportionate distribution: " + entry.Value); + // Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportionate distribution: " + entry.Value); + // hostCount += entry.Value; + // } + + // Assert.That(hostCount, Is.EqualTo(noOfConcurrentClients), "Invalid no of clients used"); + //} + + //private static void UseClient(IRedisClientsManager manager, int clientNo, Dictionary hostCountMap) + //{ + // using (var client = manager.GetClient()) + // { + // lock (hostCountMap) + // { + // int hostCount; + // if (!hostCountMap.TryGetValue(client.Host, out hostCount)) + // { + // hostCount = 0; + // } + + // hostCountMap[client.Host] = ++hostCount; + // } + + // Debug.WriteLine(String.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); + // } + //} + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs index ba67cb57..d1a9f7cb 100644 --- a/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs +++ b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs @@ -1,364 +1,462 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Linq; using System.Threading; -using Moq; using NUnit.Framework; using ServiceStack.Text; +#if NETCORE +using System.Threading.Tasks; +#endif namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class PooledRedisClientManagerTests - { - readonly string[] testReadWriteHosts = new[] { - "readwrite1", "readwrite2:6000", "192.168.0.1", "localhost" - }; - - readonly string[] testReadOnlyHosts = new[] { - "read1", "read2:7000", "127.0.0.1" - }; - - private string firstReadWriteHost; - private string firstReadOnlyHost; - - private Mock mockFactory; - - [SetUp] - public void OnBeforeEachTest() - { - firstReadWriteHost = testReadWriteHosts[0]; - firstReadOnlyHost = testReadOnlyHosts[0]; - - SetupRedisFactoryMock(); - } - - private void SetupRedisFactoryMock() - { - mockFactory = new Mock(); - mockFactory.Expect(x => x.CreateRedisClient( - It.IsAny(), It.IsAny())) - .Returns((Func)((host, port) => new RedisClient(host, port))); - } - - public PooledRedisClientManager CreateManager( - IRedisClientFactory usingFactory, string[] readWriteHosts, string[] readOnlyHosts) - { - return new PooledRedisClientManager(readWriteHosts, readOnlyHosts, - new RedisClientManagerConfig { - MaxWritePoolSize = readWriteHosts.Length, - MaxReadPoolSize = readOnlyHosts.Length, - AutoStart = false, - }) - { - RedisClientFactory = usingFactory, - }; - } - - public PooledRedisClientManager CreateManager( - IRedisClientFactory usingFactory, params string[] readWriteHosts) - { - return new PooledRedisClientManager(readWriteHosts) { - RedisClientFactory = usingFactory, - }; - } - - public PooledRedisClientManager CreateManager(params string[] readWriteHosts) - { - return CreateManager(mockFactory.Object, readWriteHosts, readWriteHosts); - } - - public PooledRedisClientManager CreateManager() - { - return CreateManager(mockFactory.Object, testReadWriteHosts, testReadOnlyHosts); - } - - public PooledRedisClientManager CreateAndStartManager() - { - var manager = CreateManager(); - manager.Start(); - return manager; - } - - [Test] - public void Cant_get_client_without_calling_Start() - { - using (var manager = CreateManager()) - { - try - { - var client = manager.GetClient(); - } - catch (InvalidOperationException) - { - return; - } - Assert.Fail("Should throw"); - } - } - - [Test] - public void Can_get_client_after_calling_Start() - { - using (var manager = CreateManager()) - { - manager.Start(); - var client = manager.GetClient(); - } - } - - [Test] - public void Can_get_ReadWrite_client() - { - using (var manager = CreateAndStartManager()) - { - var client = manager.GetClient(); - - AssertClientHasHost(client, firstReadWriteHost); - - mockFactory.VerifyAll(); - } - } - - private static void AssertClientHasHost(IRedisClient client, string hostWithOptionalPort) - { - var parts = hostWithOptionalPort.Split(':'); - var port = parts.Length > 1 ? int.Parse(parts[1]) : RedisNativeClient.DefaultPort; - - Assert.That(client.Host, Is.EqualTo(parts[0])); - Assert.That(client.Port, Is.EqualTo(port)); - } - - [Test] - public void Can_get_ReadOnly_client() - { - using (var manager = CreateAndStartManager()) - { - var client = manager.GetReadOnlyClient(); - - AssertClientHasHost(client, firstReadOnlyHost); - - mockFactory.VerifyAll(); - } - } - - [Test] - public void Does_loop_through_ReadWrite_hosts() - { - using (var manager = CreateAndStartManager()) - { - var client1 = manager.GetClient(); - client1.Dispose(); - var client2 = manager.GetClient(); - var client3 = manager.GetClient(); - var client4 = manager.GetClient(); - var client5 = manager.GetClient(); - - AssertClientHasHost(client1, testReadWriteHosts[0]); - AssertClientHasHost(client2, testReadWriteHosts[1]); - AssertClientHasHost(client3, testReadWriteHosts[2]); - AssertClientHasHost(client4, testReadWriteHosts[3]); - AssertClientHasHost(client5, testReadWriteHosts[0]); - - mockFactory.VerifyAll(); - } - } - - [Test] - public void Does_loop_through_ReadOnly_hosts() - { - using (var manager = CreateAndStartManager()) - { - var client1 = manager.GetReadOnlyClient(); - client1.Dispose(); - var client2 = manager.GetReadOnlyClient(); - client2.Dispose(); - var client3 = manager.GetReadOnlyClient(); - var client4 = manager.GetReadOnlyClient(); - var client5 = manager.GetReadOnlyClient(); - - AssertClientHasHost(client1, testReadOnlyHosts[0]); - AssertClientHasHost(client2, testReadOnlyHosts[1]); - AssertClientHasHost(client3, testReadOnlyHosts[2]); - AssertClientHasHost(client4, testReadOnlyHosts[0]); - AssertClientHasHost(client5, testReadOnlyHosts[1]); - - mockFactory.VerifyAll(); - } - } - - [Test] - public void Can_have_different_pool_size_and_host_configurations() - { - var writeHosts = new[] { "readwrite1" }; - var readHosts = new[] { "read1", "read2" }; - - const int poolSizeMultiplier = 4; - - using (var manager = new PooledRedisClientManager(writeHosts, readHosts, - new RedisClientManagerConfig { - MaxWritePoolSize = writeHosts.Length * poolSizeMultiplier, - MaxReadPoolSize = readHosts.Length * poolSizeMultiplier, - AutoStart = true, - } - ) { - RedisClientFactory = mockFactory.Object, - } - ) - { - //A poolsize of 4 will not block getting 4 clients - using (var client1 = manager.GetClient()) - using (var client2 = manager.GetClient()) - using (var client3 = manager.GetClient()) - using (var client4 = manager.GetClient()) - { - AssertClientHasHost(client1, writeHosts[0]); - AssertClientHasHost(client2, writeHosts[0]); - AssertClientHasHost(client3, writeHosts[0]); - AssertClientHasHost(client4, writeHosts[0]); - } - - //A poolsize of 8 will not block getting 8 clients - using (var client1 = manager.GetReadOnlyClient()) - using (var client2 = manager.GetReadOnlyClient()) - using (var client3 = manager.GetReadOnlyClient()) - using (var client4 = manager.GetReadOnlyClient()) - using (var client5 = manager.GetReadOnlyClient()) - using (var client6 = manager.GetReadOnlyClient()) - using (var client7 = manager.GetReadOnlyClient()) - using (var client8 = manager.GetReadOnlyClient()) - { - AssertClientHasHost(client1, readHosts[0]); - AssertClientHasHost(client2, readHosts[1]); - AssertClientHasHost(client3, readHosts[0]); - AssertClientHasHost(client4, readHosts[1]); - AssertClientHasHost(client5, readHosts[0]); - AssertClientHasHost(client6, readHosts[1]); - AssertClientHasHost(client7, readHosts[0]); - AssertClientHasHost(client8, readHosts[1]); - } - - mockFactory.VerifyAll(); - } - } - - [Test] - public void Does_block_ReadWrite_clients_pool() - { - using (var manager = CreateAndStartManager()) - { - var delay = TimeSpan.FromSeconds(1); - var client1 = manager.GetClient(); - var client2 = manager.GetClient(); - var client3 = manager.GetClient(); - var client4 = manager.GetClient(); - - Action func = delegate { - Thread.Sleep(delay + TimeSpan.FromSeconds(0.5)); - client4.Dispose(); - }; - - func.BeginInvoke(null, null); - - var start = DateTime.Now; - - var client5 = manager.GetClient(); - - Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); - - AssertClientHasHost(client1, testReadWriteHosts[0]); - AssertClientHasHost(client2, testReadWriteHosts[1]); - AssertClientHasHost(client3, testReadWriteHosts[2]); - AssertClientHasHost(client4, testReadWriteHosts[3]); - AssertClientHasHost(client5, testReadWriteHosts[3]); - - mockFactory.VerifyAll(); - } - } - - [Test] - public void Does_block_ReadOnly_clients_pool() - { - var delay = TimeSpan.FromSeconds(1); - - using (var manager = CreateAndStartManager()) - { - var client1 = manager.GetReadOnlyClient(); - var client2 = manager.GetReadOnlyClient(); - var client3 = manager.GetReadOnlyClient(); - - Action func = delegate { - Thread.Sleep(delay + TimeSpan.FromSeconds(0.5)); - client3.Dispose(); - }; - - func.BeginInvoke(null, null); - - var start = DateTime.Now; - - var client4 = manager.GetReadOnlyClient(); - - Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); - - AssertClientHasHost(client1, testReadOnlyHosts[0]); - AssertClientHasHost(client2, testReadOnlyHosts[1]); - AssertClientHasHost(client3, testReadOnlyHosts[2]); - AssertClientHasHost(client4, testReadOnlyHosts[2]); - - mockFactory.VerifyAll(); - } - } - - [Test] - public void Can_support_64_threads_using_the_client_simultaneously() - { - const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 - var clientUsageMap = new Dictionary(); - - var clientAsyncResults = new List(); - using (var manager = CreateAndStartManager()) - { - for (var i = 0; i < noOfConcurrentClients; i++) - { - var clientNo = i; - var action = (Action)(() => UseClient(manager, clientNo, clientUsageMap)); - clientAsyncResults.Add(action.BeginInvoke(null, null)); - } - } - - WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); - - Debug.WriteLine(TypeSerializer.SerializeToString(clientUsageMap)); - - var hostCount = 0; - foreach (var entry in clientUsageMap) - { - Assert.That(entry.Value, Is.GreaterThanOrEqualTo(5), "Host has unproportianate distrobution: " + entry.Value); - Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportianate distrobution: " + entry.Value); - hostCount += entry.Value; - } - - Assert.That(hostCount, Is.EqualTo(noOfConcurrentClients), "Invalid no of clients used"); - } - - private static void UseClient(IRedisClientsManager manager, int clientNo, Dictionary hostCountMap) - { - using (var client = manager.GetClient()) - { - lock (hostCountMap) - { - int hostCount; - if (!hostCountMap.TryGetValue(client.Host, out hostCount)) - { - hostCount = 0; - } - - hostCountMap[client.Host] = ++hostCount; - } - - Debug.WriteLine(String.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); - } - } - - } + [TestFixture, Category("Integration")] + public class PooledRedisClientManagerTests + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.VerifyMasterConnections = false; + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + RedisConfig.VerifyMasterConnections = true; + } + + readonly string[] testReadWriteHosts = new[] { + "readwrite1", "readwrite2:6000", "192.168.0.1", "localhost" + }; + + readonly string[] testReadOnlyHosts = new[] { + "read1", "read2:7000", "127.0.0.1" + }; + + private string firstReadWriteHost; + private string firstReadOnlyHost; + + [SetUp] + public void OnBeforeEachTest() + { + firstReadWriteHost = testReadWriteHosts[0]; + firstReadOnlyHost = testReadOnlyHosts[0]; + } + + public PooledRedisClientManager CreateManager(string[] readWriteHosts, string[] readOnlyHosts, int? defaultDb = null) + { + return new PooledRedisClientManager(readWriteHosts, readOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = readWriteHosts.Length, + MaxReadPoolSize = readOnlyHosts.Length, + AutoStart = false, + DefaultDb = defaultDb + }); + } + public PooledRedisClientManager CreateManager(params string[] readWriteHosts) + { + return CreateManager(readWriteHosts, readWriteHosts); + } + + public PooledRedisClientManager CreateManager() + { + return CreateManager(testReadWriteHosts, testReadOnlyHosts); + } + + public PooledRedisClientManager CreateAndStartManager() + { + var manager = CreateManager(); + manager.Start(); + return manager; + } + + [Test] + public void Cant_get_client_without_calling_Start() + { + using (var manager = CreateManager()) + { + try + { + var client = manager.GetClient(); + } + catch (InvalidOperationException) + { + return; + } + Assert.Fail("Should throw"); + } + } + + [Test] + public void Can_change_db_for_client_PooledRedisClientManager() + { + using (var db1 = new PooledRedisClientManager(1, new string[] { TestConfig.SingleHost })) + using (var db2 = new PooledRedisClientManager(2, new string[] { TestConfig.SingleHost })) + { + var val = Environment.TickCount; + var key = "test" + val; + var db1c = db1.GetClient(); + var db2c = db2.GetClient(); + try + { + db1c.Set(key, val); + Assert.That(db2c.Get(key), Is.EqualTo(0)); + Assert.That(db1c.Get(key), Is.EqualTo(val)); + } + finally + { + db1c.Remove(key); + } + } + } + + [Test] + public void Can_change_db_for_client_RedisManagerPool() + { + using (var db1 = new RedisManagerPool(TestConfig.SingleHost + "?db=1")) + using (var db2 = new RedisManagerPool(TestConfig.SingleHost + "?db=2")) + { + var val = Environment.TickCount; + var key = "test" + val; + var db1c = db1.GetClient(); + var db2c = db2.GetClient(); + try + { + db1c.Set(key, val); + Assert.That(db2c.Get(key), Is.EqualTo(0)); + Assert.That(db1c.Get(key), Is.EqualTo(val)); + } + finally + { + db1c.Remove(key); + } + } + } + + [Test] + public void Can_change_db_for_client_BasicRedisClientManager() + { + using (var db1 = new BasicRedisClientManager(1, new string[] { TestConfig.SingleHost })) + using (var db2 = new BasicRedisClientManager(2, new string[] { TestConfig.SingleHost })) + { + var val = Environment.TickCount; + var key = "test" + val; + var db1c = db1.GetClient(); + var db2c = db2.GetClient(); + try + { + db1c.Set(key, val); + Assert.That(db2c.Get(key), Is.EqualTo(0)); + Assert.That(db1c.Get(key), Is.EqualTo(val)); + } + finally + { + db1c.Remove(key); + } + } + } + + [Test] + public void Can_get_client_after_calling_Start() + { + using (var manager = CreateManager()) + { + manager.Start(); + var client = manager.GetClient(); + } + } + + [Test] + public void Can_get_ReadWrite_client() + { + using (var manager = CreateAndStartManager()) + { + var client = manager.GetClient(); + + AssertClientHasHost(client, firstReadWriteHost); + } + } + + private static void AssertClientHasHost(IRedisClient client, string hostWithOptionalPort) + { + var parts = hostWithOptionalPort.Split(':'); + var port = parts.Length > 1 ? int.Parse(parts[1]) : RedisConfig.DefaultPort; + + Assert.That(client.Host, Is.EqualTo(parts[0])); + Assert.That(client.Port, Is.EqualTo(port)); + } + + [Test] + public void Can_get_ReadOnly_client() + { + using (var manager = CreateAndStartManager()) + { + var client = manager.GetReadOnlyClient(); + + AssertClientHasHost(client, firstReadOnlyHost); + } + } + + [Test] + public void Does_loop_through_ReadWrite_hosts() + { + using (var manager = CreateAndStartManager()) + { + var client1 = manager.GetClient(); + client1.Dispose(); + var client2 = manager.GetClient(); + var client3 = manager.GetClient(); + var client4 = manager.GetClient(); + var client5 = manager.GetClient(); + + AssertClientHasHost(client1, testReadWriteHosts[0]); + AssertClientHasHost(client2, testReadWriteHosts[1]); + AssertClientHasHost(client3, testReadWriteHosts[2]); + AssertClientHasHost(client4, testReadWriteHosts[3]); + AssertClientHasHost(client5, testReadWriteHosts[0]); + } + } + + [Test] + public void Does_loop_through_ReadOnly_hosts() + { + using (var manager = CreateAndStartManager()) + { + var client1 = manager.GetReadOnlyClient(); + client1.Dispose(); + var client2 = manager.GetReadOnlyClient(); + client2.Dispose(); + var client3 = manager.GetReadOnlyClient(); + var client4 = manager.GetReadOnlyClient(); + var client5 = manager.GetReadOnlyClient(); + + AssertClientHasHost(client1, testReadOnlyHosts[0]); + AssertClientHasHost(client2, testReadOnlyHosts[1]); + AssertClientHasHost(client3, testReadOnlyHosts[2]); + AssertClientHasHost(client4, testReadOnlyHosts[0]); + AssertClientHasHost(client5, testReadOnlyHosts[1]); + } + } + + [Test] + public void Can_have_different_pool_size_and_host_configurations() + { + var writeHosts = new[] { "readwrite1" }; + var readHosts = new[] { "read1", "read2" }; + + const int poolSizeMultiplier = 4; + + using (var manager = new PooledRedisClientManager(writeHosts, readHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = writeHosts.Length * poolSizeMultiplier, + MaxReadPoolSize = readHosts.Length * poolSizeMultiplier, + AutoStart = true, + } + ) + ) + { + //A poolsize of 4 will not block getting 4 clients + using (var client1 = manager.GetClient()) + using (var client2 = manager.GetClient()) + using (var client3 = manager.GetClient()) + using (var client4 = manager.GetClient()) + { + AssertClientHasHost(client1, writeHosts[0]); + AssertClientHasHost(client2, writeHosts[0]); + AssertClientHasHost(client3, writeHosts[0]); + AssertClientHasHost(client4, writeHosts[0]); + } + + //A poolsize of 8 will not block getting 8 clients + using (var client1 = manager.GetReadOnlyClient()) + using (var client2 = manager.GetReadOnlyClient()) + using (var client3 = manager.GetReadOnlyClient()) + using (var client4 = manager.GetReadOnlyClient()) + using (var client5 = manager.GetReadOnlyClient()) + using (var client6 = manager.GetReadOnlyClient()) + using (var client7 = manager.GetReadOnlyClient()) + using (var client8 = manager.GetReadOnlyClient()) + { + AssertClientHasHost(client1, readHosts[0]); + AssertClientHasHost(client2, readHosts[1]); + AssertClientHasHost(client3, readHosts[0]); + AssertClientHasHost(client4, readHosts[1]); + AssertClientHasHost(client5, readHosts[0]); + AssertClientHasHost(client6, readHosts[1]); + AssertClientHasHost(client7, readHosts[0]); + AssertClientHasHost(client8, readHosts[1]); + } + } + } + + [Test] + public void Does_block_ReadWrite_clients_pool() + { + using (var manager = CreateAndStartManager()) + { + var delay = TimeSpan.FromSeconds(1); + var client1 = manager.GetClient(); + var client2 = manager.GetClient(); + var client3 = manager.GetClient(); + var client4 = manager.GetClient(); + + Action func = delegate + { + Thread.Sleep(delay + TimeSpan.FromSeconds(0.5)); + client4.Dispose(); + }; + +#if NETCORE + Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + + var start = DateTime.Now; + + var client5 = manager.GetClient(); + + Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); + + AssertClientHasHost(client1, testReadWriteHosts[0]); + AssertClientHasHost(client2, testReadWriteHosts[1]); + AssertClientHasHost(client3, testReadWriteHosts[2]); + AssertClientHasHost(client4, testReadWriteHosts[3]); + AssertClientHasHost(client5, testReadWriteHosts[3]); + } + } + + [Test] + public void Does_block_ReadOnly_clients_pool() + { + var delay = TimeSpan.FromSeconds(1); + + using (var manager = CreateAndStartManager()) + { + var client1 = manager.GetReadOnlyClient(); + var client2 = manager.GetReadOnlyClient(); + var client3 = manager.GetReadOnlyClient(); + + Action func = delegate + { + Thread.Sleep(delay + TimeSpan.FromSeconds(0.5)); + client3.Dispose(); + }; +#if NETCORE + Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + var start = DateTime.Now; + + var client4 = manager.GetReadOnlyClient(); + + Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); + + AssertClientHasHost(client1, testReadOnlyHosts[0]); + AssertClientHasHost(client2, testReadOnlyHosts[1]); + AssertClientHasHost(client3, testReadOnlyHosts[2]); + AssertClientHasHost(client4, testReadOnlyHosts[2]); + } + } + + [Test] + public void Does_throw_TimeoutException_when_PoolTimeout_exceeded() + { + using (var manager = new PooledRedisClientManager(testReadWriteHosts, testReadOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = 4, + MaxReadPoolSize = 4, + AutoStart = false, + })) + { + manager.PoolTimeout = 100; + + manager.Start(); + + var masters = 4.Times(i => manager.GetClient()); + + try + { + manager.GetClient(); + Assert.Fail("Should throw TimeoutException"); + } + catch (TimeoutException ex) + { + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); + } + + var replicas = 4.Times(i => manager.GetReadOnlyClient()); + + try + { + manager.GetReadOnlyClient(); + Assert.Fail("Should throw TimeoutException"); + } + catch (TimeoutException ex) + { + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); + } + } + } + + //[Ignore("tempromental integration test")] + //[Test] + //public void Can_support_64_threads_using_the_client_simultaneously() + //{ + // const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + // var clientUsageMap = new Dictionary(); + + // var clientAsyncResults = new List(); + // using (var manager = CreateAndStartManager()) + // { + // for (var i = 0; i < noOfConcurrentClients; i++) + // { + // var clientNo = i; + // var action = (Action)(() => UseClient(manager, clientNo, clientUsageMap)); + // clientAsyncResults.Add(action.BeginInvoke(null, null)); + // } + // } + + // WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); + + // RedisStats.ToDictionary().PrintDump(); + + // Debug.WriteLine(TypeSerializer.SerializeToString(clientUsageMap)); + + // var hostCount = 0; + // foreach (var entry in clientUsageMap) + // { + // Assert.That(entry.Value, Is.GreaterThanOrEqualTo(2), "Host has unproportionate distribution: " + entry.Value); + // Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportionate distribution: " + entry.Value); + // hostCount += entry.Value; + // } + + // Assert.That(hostCount, Is.EqualTo(noOfConcurrentClients), "Invalid no of clients used"); + //} + + private static void UseClient(IRedisClientsManager manager, int clientNo, Dictionary hostCountMap) + { + using (var client = manager.GetClient()) + { + lock (hostCountMap) + { + int hostCount; + if (!hostCountMap.TryGetValue(client.Host, out hostCount)) + { + hostCount = 0; + } + + hostCountMap[client.Host] = ++hostCount; + } + + Debug.WriteLine(String.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); + } + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs b/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs index da860cde..157ff9b9 100644 --- a/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs +++ b/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs @@ -33,4 +33,4 @@ // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/QueueTests.cs b/tests/ServiceStack.Redis.Tests/QueueTests.cs index fcd8ab85..8dd66806 100644 --- a/tests/ServiceStack.Redis.Tests/QueueTests.cs +++ b/tests/ServiceStack.Redis.Tests/QueueTests.cs @@ -7,31 +7,32 @@ namespace ServiceStack.Redis.Tests { [TestFixture] +#if NETCORE + [Ignore(".NET Core does not implement BinaryFormatter required for these tests")] +#endif public class QueueTests : RedisClientTestsBase { const int numMessages = 6; private IList messages0 = new List(); private IList messages1 = new List(); - private string[] patients = new[] {"patient0", "patient1"}; + private string[] patients = new[] { "patient0", "patient1" }; [SetUp] public override void OnBeforeEachTest() { - base.OnBeforeEachTest(); - for (int i = 0; i < numMessages; ++i) - { - messages0.Add(String.Format("{0}_message{1}", patients[0], i)); - messages1.Add(String.Format("{0}_message{1}", patients[1], i)); - } - + base.OnBeforeEachTest(); + for (int i = 0; i < numMessages; ++i) + { + messages0.Add(String.Format("{0}_message{1}", patients[0], i)); + messages1.Add(String.Format("{0}_message{1}", patients[1], i)); + } } [Test] public void TestSequentialWorkQueueUpdate() { - using (var queue = new RedisSequentialWorkQueue(10, 10, "127.0.0.1", 6379,1)) + using (var queue = new RedisSequentialWorkQueue(10, 10, TestConfig.SingleHost, TestConfig.RedisPort, 1)) { - for (int i = 0; i < numMessages; ++i) { queue.Enqueue(patients[0], messages0[i]); @@ -41,14 +42,13 @@ public void TestSequentialWorkQueueUpdate() for (int i = 0; i < numMessages / 2; ++i) { queue.Update(patients[0], i, messages0[i] + "UPDATE"); - } + } queue.PrepareNextWorkItem(); var batch = queue.Dequeue(numMessages / 2); // check that half of patient[0] messages are returned for (int i = 0; i < numMessages / 2; ++i) { Assert.AreEqual(batch.DequeueItems[i], messages0[i] + "UPDATE"); - } } } @@ -56,9 +56,8 @@ public void TestSequentialWorkQueueUpdate() [Test] public void TestSequentialWorkQueue() { - using (var queue = new RedisSequentialWorkQueue(10,10,"127.0.0.1",6379,1)) + using (var queue = new RedisSequentialWorkQueue(10, 10, TestConfig.SingleHost, TestConfig.RedisPort, 1)) { - for (int i = 0; i < numMessages; ++i) { queue.Enqueue(patients[0], messages0[i]); @@ -66,20 +65,19 @@ public void TestSequentialWorkQueue() } queue.PrepareNextWorkItem(); - var batch = queue.Dequeue(numMessages/2); + var batch = queue.Dequeue(numMessages / 2); // check that half of patient[0] messages are returned - for (int i = 0; i < numMessages/2; ++i) + for (int i = 0; i < numMessages / 2; ++i) Assert.AreEqual(batch.DequeueItems[i], messages0[i]); - Assert.AreEqual(numMessages/2, batch.DequeueItems.Count); + Assert.AreEqual(numMessages / 2, batch.DequeueItems.Count); Thread.Sleep(5000); Assert.IsTrue(queue.HarvestZombies()); for (int i = 0; i < batch.DequeueItems.Count; ++i) batch.DoneProcessedWorkItem(); - // check that all patient[1] messages are returned queue.PrepareNextWorkItem(); - batch = queue.Dequeue(2*numMessages); + batch = queue.Dequeue(2 * numMessages); // check that batch size is respected Assert.AreEqual(batch.DequeueItems.Count, numMessages); for (int i = 0; i < numMessages; ++i) @@ -87,16 +85,15 @@ public void TestSequentialWorkQueue() Assert.AreEqual(batch.DequeueItems[i], messages1[i]); batch.DoneProcessedWorkItem(); } - // check that there are numMessages/2 messages in the queue queue.PrepareNextWorkItem(); batch = queue.Dequeue(numMessages); - Assert.AreEqual(batch.DequeueItems.Count, numMessages/2); + Assert.AreEqual(batch.DequeueItems.Count, numMessages / 2); // test pop and unlock batch.DoneProcessedWorkItem(); - int remaining = batch.DequeueItems.Count-1; + int remaining = batch.DequeueItems.Count - 1; batch.PopAndUnlock(); //process remaining items @@ -109,15 +106,13 @@ public void TestSequentialWorkQueue() Assert.IsFalse(queue.PrepareNextWorkItem()); batch = queue.Dequeue(remaining); Assert.AreEqual(batch.DequeueItems.Count, 0); - } - } [Test] public void TestChronologicalWorkQueue() { - using (var queue = new RedisChronologicalWorkQueue(10, 10, "127.0.0.1", 6379)) + using (var queue = new RedisChronologicalWorkQueue(10, 10, TestConfig.SingleHost, TestConfig.RedisPort)) { const int numMessages = 6; var messages = new List(); @@ -127,9 +122,9 @@ public void TestChronologicalWorkQueue() for (int i = 0; i < numMessages; ++i) { time.Add(i); - patients.Add(String.Format("patient{0}",i)); + patients.Add(String.Format("patient{0}", i)); messages.Add(String.Format("{0}_message{1}", patients[i], i)); - queue.Enqueue(patients[i], messages[i],i); + queue.Enqueue(patients[i], messages[i], i); } // dequeue half of the messages @@ -139,24 +134,22 @@ public void TestChronologicalWorkQueue() Assert.AreEqual(batch[i].Value, messages[i]); // dequeue the rest of the messages - batch = queue.Dequeue(0,numMessages,2 * numMessages); + batch = queue.Dequeue(0, numMessages, 2 * numMessages); // check that batch size is respected - Assert.AreEqual(batch.Count, numMessages/2); - for (int i = 0; i < numMessages/2; ++i) - Assert.AreEqual(batch[i].Value, messages[i + numMessages/2]); + Assert.AreEqual(batch.Count, numMessages / 2); + for (int i = 0; i < numMessages / 2; ++i) + Assert.AreEqual(batch[i].Value, messages[i + numMessages / 2]); // check that there are no more messages in the queue - batch = queue.Dequeue(0,numMessages, numMessages); + batch = queue.Dequeue(0, numMessages, numMessages); Assert.AreEqual(batch.Count, 0); - } } - [Test] - public void TestSimpleWorkQueue() + public void TestSimpleWorkQueue() { - using (var queue = new RedisSimpleWorkQueue(10, 10, "127.0.0.1", 6379)) + using (var queue = new RedisSimpleWorkQueue(10, 10, TestConfig.SingleHost, TestConfig.RedisPort)) { int numMessages = 6; var messages = new string[numMessages]; @@ -165,21 +158,18 @@ public void TestSimpleWorkQueue() messages[i] = String.Format("message#{0}", i); queue.Enqueue(messages[i]); } - var batch = queue.Dequeue(numMessages*2); + var batch = queue.Dequeue(numMessages * 2); //test that batch size is respected Assert.AreEqual(batch.Count, numMessages); // test that messages are returned, in correct order for (int i = 0; i < numMessages; ++i) - Assert.AreEqual(messages[i], batch[i]); + Assert.AreEqual(messages[i], batch[i]); //test that messages were removed from queue batch = queue.Dequeue(numMessages * 2); Assert.AreEqual(batch.Count, 0); - } } - - } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.Async.cs new file mode 100644 index 00000000..53bab942 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.Async.cs @@ -0,0 +1,266 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Script; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisBasicPersistenceProviderTestsAsync + : RedisClientTestsBaseAsync + { + List testModels; + + public static string TestModelIdsSetKey = "ids:" + typeof(TestModel).Name; + + public class TestModel + : IHasId + { + public Guid Id { get; set; } + public string Name { get; set; } + public int Age { get; set; } + + //Thanking R# for the timesaver + public bool Equals(TestModel other) + { + if (other is null) return false; + if (ReferenceEquals(this, other)) return true; + return other.Id.Equals(Id) && Equals(other.Name, Name) && other.Age == Age; + } + + public override bool Equals(object obj) + { + if (obj is null) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(TestModel)) return false; + return Equals((TestModel)obj); + } + + [SuppressMessage("Style", "IDE0070:Use 'System.HashCode'", Justification = "not in netfx")] + public override int GetHashCode() + { + unchecked + { + int result = Id.GetHashCode(); + result = (result * 397) ^ (Name != null ? Name.GetHashCode() : 0); + result = (result * 397) ^ Age; + return result; + } + } + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + RedisRaw.NamespacePrefix = "RedisBasicPersistenceProviderTests"; + testModels = new List(); + 5.Times(i => testModels.Add( + new TestModel { Id = Guid.NewGuid(), Name = "Name" + i, Age = 20 + i })); + } + + [Test] + public async Task Can_Store() + { + foreach (var x in testModels) + { + await RedisAsync.StoreAsync(x); + } + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + } + + [Test] + public async Task Can_StoreAll() + { + await RedisAsync.StoreAllAsync(testModels); + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + } + + [Test] + public async Task Can_WriteAll() + { + await RedisAsync.WriteAllAsync(testModels); + + var testModelIds = testModels.ConvertAll(x => x.Id); + + var allModels = (await RedisAsync.GetByIdsAsync(testModelIds)) + .OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + } + + [Test] + public async Task Can_GetById() + { + await RedisAsync.StoreAllAsync(testModels); + + var last = testModels.Last(); + var lastById = await RedisAsync.GetByIdAsync(last.Id); + + Assert.That(lastById, Is.EqualTo(last)); + } + + [Test] + public async Task Can_GetByIds() + { + await RedisAsync.StoreAllAsync(testModels); + + var evenTestModels = testModels.Where(x => x.Age % 2 == 0) + .OrderBy(x => x.Id).ToList(); + var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); + + var selectedModels = (await RedisAsync.GetByIdsAsync(evenTestModelIds)) + .OrderBy(x => x.Id).ToList(); + + Assert.That(selectedModels, Is.EqualTo(evenTestModels)); + } + + [Test] + public async Task Can_Delete() + { + await RedisAsync.StoreAllAsync(testModels); + + var last = testModels.Last(); + await RedisAsync.DeleteAsync(last); + + testModels.Remove(last); + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + + //Test internal TestModelIdsSetKey state + var idsRemaining = (await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + TestModelIdsSetKey)) + .OrderBy(x => x).Map(x => new Guid(x)); + + var testModelIds = testModels.OrderBy(x => x.Id).Map(x => x.Id); + + Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); + } + + [Test] + public async Task Can_DeleteAll() + { + await RedisAsync.StoreAllAsync(testModels); + + await RedisAsync.DeleteAllAsync(); + + var allModels = await RedisAsync.As().GetAllAsync(); + + Assert.That(allModels, Is.Empty); + + //Test internal TestModelIdsSetKey state + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_DeleteAll_with_runtime_type() + { + await RedisAsync.StoreAllAsync(testModels); + + var mi = typeof(IEntityStoreAsync).GetMethod(nameof(IEntityStoreAsync.DeleteAllAsync)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + await (Task)genericMi.Invoke(RedisAsync, new object[] { CancellationToken.None }); + + var allModels = await RedisAsync.As().GetAllAsync(); + Assert.That(allModels, Is.Empty); + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_As_DeleteAll_with_runtime_type() + { + await RedisAsync.StoreAllAsync(testModels); + + var mi = typeof(IRedisClientAsync).GetMethod(nameof(IRedisClientAsync.As)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + var typedClient = genericMi.Invoke(RedisAsync, TypeConstants.EmptyObjectArray); + var deleteMi = typeof(IEntityStoreAsync).GetMethod(nameof(IEntityStoreAsync.DeleteAllAsync)); + await (Task)deleteMi.Invoke(typedClient, new object[] { CancellationToken.None }); + + var allModels = await RedisAsync.As().GetAllAsync(); + Assert.That(allModels, Is.Empty); + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_As_DeleteAll_with_script() + { + await RedisAsync.StoreAllAsync(testModels); + + var context = new ScriptContext + { + ScriptLanguages = { ScriptLisp.Language }, + AllowScriptingOfAllTypes = true, + ScriptMethods = { + new ProtectedScripts() + }, + Args = { + ["redis"] = RedisAsync + } + }.Init(); + + var type = typeof(TestModel).FullName; +#if DEBUG + RedisRaw.DebugAllowSync = true; // not reasonable to allow async from Lisp +#endif + context.EvaluateCode($"redis.call('DeleteAll<{type}>') |> return"); + context.EvaluateCode($"redis.call('As<{type}>').call('DeleteAll') |> return"); + context.RenderLisp($"(call redis \"DeleteAll<{type}>\")"); + context.RenderLisp($"(call (call redis \"As<{type}>\") \"DeleteAll\")"); +#if DEBUG + RedisRaw.DebugAllowSync = false; +#endif + + var allModels = await RedisAsync.As().GetAllAsync(); + Assert.That(allModels, Is.Empty); + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_DeleteByIds() + { + await RedisAsync.StoreAllAsync(testModels); + + var evenTestModels = testModels.Where(x => x.Age % 2 == 0) + .OrderBy(x => x.Id).ToList(); + var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); + + await RedisAsync.DeleteByIdsAsync(evenTestModelIds); + + evenTestModels.ForEach(x => testModels.Remove(x)); + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EqualTo(testModels)); + + + //Test internal TestModelIdsSetKey state + var idsRemaining = (await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + TestModelIdsSetKey)) + .OrderBy(x => x).Map(x => new Guid(x)); + + var testModelIds = testModels.OrderBy(x => x.Id).Map(x => x.Id); + + Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs index 8e7f8fdd..86a10ff2 100644 --- a/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs @@ -2,188 +2,250 @@ using System.Collections.Generic; using System.Linq; using NUnit.Framework; -using ServiceStack.Common.Extensions; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Script; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class RedisBasicPersistenceProviderTests - : RedisClientTestsBase - { - List testModels; - - public static string TestModelIdsSetKey = "ids:" + typeof (TestModel).Name; - - public class TestModel - : IHasId - { - public Guid Id { get; set; } - public string Name { get; set; } - public int Age { get; set; } - - //Thanking R# for the timesaver - public bool Equals(TestModel other) - { - if (ReferenceEquals(null, other)) return false; - if (ReferenceEquals(this, other)) return true; - return other.Id.Equals(Id) && Equals(other.Name, Name) && other.Age == Age; - } - - public override bool Equals(object obj) - { - if (ReferenceEquals(null, obj)) return false; - if (ReferenceEquals(this, obj)) return true; - if (obj.GetType() != typeof(TestModel)) return false; - return Equals((TestModel)obj); - } - - public override int GetHashCode() - { - unchecked - { - int result = Id.GetHashCode(); - result = (result * 397) ^ (Name != null ? Name.GetHashCode() : 0); - result = (result * 397) ^ Age; - return result; - } - } - } - - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); + [TestFixture, Category("Integration")] + public class RedisBasicPersistenceProviderTests + : RedisClientTestsBase + { + List testModels; + + public static string TestModelIdsSetKey = "ids:" + typeof(TestModel).Name; + + public class TestModel + : IHasId + { + public Guid Id { get; set; } + public string Name { get; set; } + public int Age { get; set; } + + //Thanking R# for the timesaver + public bool Equals(TestModel other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return other.Id.Equals(Id) && Equals(other.Name, Name) && other.Age == Age; + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(TestModel)) return false; + return Equals((TestModel)obj); + } + + public override int GetHashCode() + { + unchecked + { + int result = Id.GetHashCode(); + result = (result * 397) ^ (Name != null ? Name.GetHashCode() : 0); + result = (result * 397) ^ Age; + return result; + } + } + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); Redis.NamespacePrefix = "RedisBasicPersistenceProviderTests"; - testModels = new List(); - 5.Times(i => testModels.Add( - new TestModel { Id = Guid.NewGuid(), Name = "Name" + i, Age = 20 + i })); - } + testModels = new List(); + 5.Times(i => testModels.Add( + new TestModel { Id = Guid.NewGuid(), Name = "Name" + i, Age = 20 + i })); + } - [Test] - public void Can_Store() - { - testModels.ForEach(x => Redis.Store(x)); + [Test] + public void Can_Store() + { + testModels.ForEach(x => Redis.Store(x)); - var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); + var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); - Assert.That(allModels, Is.EquivalentTo(testModels)); - } + Assert.That(allModels, Is.EquivalentTo(testModels)); + } - [Test] - public void Can_StoreAll() - { - Redis.StoreAll(testModels); + [Test] + public void Can_StoreAll() + { + Redis.StoreAll(testModels); - var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); + var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); - Assert.That(allModels, Is.EquivalentTo(testModels)); - } + Assert.That(allModels, Is.EquivalentTo(testModels)); + } - [Test] - public void Can_WriteAll() - { - Redis.WriteAll(testModels); + [Test] + public void Can_WriteAll() + { + Redis.WriteAll(testModels); - var testModelIds = testModels.ConvertAll(x => x.Id); + var testModelIds = testModels.ConvertAll(x => x.Id); - var allModels = Redis.GetByIds(testModelIds) - .OrderBy(x => x.Age).ToList(); + var allModels = Redis.GetByIds(testModelIds) + .OrderBy(x => x.Age).ToList(); - Assert.That(allModels, Is.EquivalentTo(testModels)); - } + Assert.That(allModels, Is.EquivalentTo(testModels)); + } - [Test] - public void Can_GetById() - { - Redis.StoreAll(testModels); + [Test] + public void Can_GetById() + { + Redis.StoreAll(testModels); - var last = testModels.Last(); - var lastById = Redis.GetById(last.Id); + var last = testModels.Last(); + var lastById = Redis.GetById(last.Id); - Assert.That(lastById, Is.EqualTo(last)); - } + Assert.That(lastById, Is.EqualTo(last)); + } - [Test] - public void Can_GetByIds() - { - Redis.StoreAll(testModels); + [Test] + public void Can_GetByIds() + { + Redis.StoreAll(testModels); - var evenTestModels = testModels.Where(x => x.Age % 2 == 0) - .OrderBy(x => x.Id).ToList(); - var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); + var evenTestModels = testModels.Where(x => x.Age % 2 == 0) + .OrderBy(x => x.Id).ToList(); + var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); - var selectedModels = Redis.GetByIds(evenTestModelIds) - .OrderBy(x => x.Id).ToList(); + var selectedModels = Redis.GetByIds(evenTestModelIds) + .OrderBy(x => x.Id).ToList(); + + Assert.That(selectedModels, Is.EqualTo(evenTestModels)); + } + + [Test] + public void Can_Delete() + { + Redis.StoreAll(testModels); + + var last = testModels.Last(); + Redis.Delete(last); + + testModels.Remove(last); - Assert.That(selectedModels, Is.EqualTo(evenTestModels)); - } + var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); - [Test] - public void Can_Delete() - { - Redis.StoreAll(testModels); + Assert.That(allModels, Is.EquivalentTo(testModels)); - var last = testModels.Last(); - Redis.Delete(last); + //Test internal TestModelIdsSetKey state + var idsRemaining = Redis.GetAllItemsFromSet(Redis.NamespacePrefix + TestModelIdsSetKey) + .OrderBy(x => x).Map(x => new Guid(x)); - testModels.Remove(last); + var testModelIds = testModels.OrderBy(x => x.Id).Map(x => x.Id); - var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); + Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); + } - Assert.That(allModels, Is.EquivalentTo(testModels)); + [Test] + public void Can_DeleteAll() + { + Redis.StoreAll(testModels); - //Test internal TestModelIdsSetKey state - var idsRemaining = Redis.GetAllItemsFromSet(Redis.NamespacePrefix + TestModelIdsSetKey) - .OrderBy(x => x).ConvertAll(x => new Guid(x)); + Redis.DeleteAll(); - var testModelIds = testModels.OrderBy(x => x.Id).ConvertAll(x => x.Id); + var allModels = Redis.GetAll(); - Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); - } + Assert.That(allModels, Is.Empty); - [Test] - public void Can_DeleteAll() - { - Redis.StoreAll(testModels); + //Test internal TestModelIdsSetKey state + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } - Redis.DeleteAll(); + [Test] + public void Can_DeleteAll_with_runtime_type() + { + Redis.StoreAll(testModels); - var allModels = Redis.GetAll(); + var mi = Redis.GetType().GetMethod(nameof(RedisClient.DeleteAll)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + genericMi.Invoke(Redis, TypeConstants.EmptyObjectArray); - Assert.That(allModels, Is.Empty); + var allModels = Redis.GetAll(); + Assert.That(allModels, Is.Empty); + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } - //Test internal TestModelIdsSetKey state - var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); - Assert.That(idsRemaining, Is.Empty); - } + [Test] + public void Can_As_DeleteAll_with_runtime_type() + { + Redis.StoreAll(testModels); - [Test] - public void Can_DeleteByIds() - { - Redis.StoreAll(testModels); + var mi = Redis.GetType().GetMethod(nameof(RedisClient.As)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + var typedClient = genericMi.Invoke(Redis, TypeConstants.EmptyObjectArray); + var deleteMi = typedClient.GetType().GetMethod(nameof(IRedisTypedClient.DeleteAll)); + deleteMi.Invoke(typedClient, TypeConstants.EmptyObjectArray); - var evenTestModels = testModels.Where(x => x.Age % 2 == 0) - .OrderBy(x => x.Id).ToList(); - var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); + var allModels = Redis.GetAll(); + Assert.That(allModels, Is.Empty); + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } - Redis.DeleteByIds(evenTestModelIds); + [Test] + public void Can_As_DeleteAll_with_script() + { + Redis.StoreAll(testModels); + + var context = new ScriptContext { + ScriptLanguages = { ScriptLisp.Language }, + AllowScriptingOfAllTypes = true, + ScriptMethods = { + new ProtectedScripts() + }, + Args = { + ["redis"] = Redis + } + }.Init(); - evenTestModels.ForEach(x => testModels.Remove(x)); + var type = typeof(TestModel).FullName; + context.EvaluateCode($"redis.call('DeleteAll<{type}>') |> return"); + context.EvaluateCode($"redis.call('As<{type}>').call('DeleteAll') |> return"); + context.RenderLisp($"(call redis \"DeleteAll<{type}>\")"); + context.RenderLisp($"(call (call redis \"As<{type}>\") \"DeleteAll\")"); - var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); + var allModels = Redis.GetAll(); + Assert.That(allModels, Is.Empty); + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } - Assert.That(allModels, Is.EqualTo(testModels)); + [Test] + public void Can_DeleteByIds() + { + Redis.StoreAll(testModels); + var evenTestModels = testModels.Where(x => x.Age % 2 == 0) + .OrderBy(x => x.Id).ToList(); + var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); - //Test internal TestModelIdsSetKey state - var idsRemaining = Redis.GetAllItemsFromSet(Redis.NamespacePrefix + TestModelIdsSetKey) - .OrderBy(x => x).ConvertAll(x => new Guid(x)); + Redis.DeleteByIds(evenTestModelIds); - var testModelIds = testModels.OrderBy(x => x.Id).ConvertAll(x => x.Id); + evenTestModels.ForEach(x => testModels.Remove(x)); - Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); - } + var allModels = Redis.GetAll().OrderBy(x => x.Age).ToList(); - } + Assert.That(allModels, Is.EqualTo(testModels)); + + + //Test internal TestModelIdsSetKey state + var idsRemaining = Redis.GetAllItemsFromSet(Redis.NamespacePrefix + TestModelIdsSetKey) + .OrderBy(x => x).Map(x => new Guid(x)); + + var testModelIds = testModels.OrderBy(x => x.Id).Map(x => x.Id); + + Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); + } + + } } diff --git a/tests/ServiceStack.Redis.Tests/RedisBatchTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisBatchTests.Async.cs new file mode 100644 index 00000000..3a3e1e82 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisBatchTests.Async.cs @@ -0,0 +1,46 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisBatchTestsAsync + : RedisClientTestsBaseAsync + { + public class Message + { + public long Id { get; set; } + public string Key { get; set; } + public string Value { get; set; } + public string Description { get; set; } + } + + [Test] + public async Task Store_batch_items_in_List() + { + var redisMessages = RedisAsync.As(); + const int batchSize = 500; + var nextIds = await redisMessages.GetNextSequenceAsync(batchSize); + + var msgBatch = batchSize.Times(i => + new Message + { + Id = nextIds - (batchSize - i) + 1, + Key = i.ToString(), + Value = Guid.NewGuid().ToString(), + Description = "Description" + }); + + await redisMessages.Lists["listName"].AddRangeAsync(msgBatch); + + var msgs = await redisMessages.Lists["listName"].GetAllAsync(); + Assert.That(msgs.Count, Is.EqualTo(batchSize)); + + Assert.That(msgs.First().Id, Is.EqualTo(1)); + Assert.That(msgs.Last().Id, Is.EqualTo(500)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisBatchTests.cs b/tests/ServiceStack.Redis.Tests/RedisBatchTests.cs new file mode 100644 index 00000000..5004e042 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisBatchTests.cs @@ -0,0 +1,45 @@ +using System; +using System.Linq; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisBatchTests + : RedisClientTestsBase + { + public class Message + { + public long Id { get; set; } + public string Key { get; set; } + public string Value { get; set; } + public string Description { get; set; } + } + + [Test] + public void Store_batch_items_in_List() + { + var redisMessages = Redis.As(); + const int batchSize = 500; + var nextIds = redisMessages.GetNextSequence(batchSize); + + var msgBatch = batchSize.Times(i => + new Message + { + Id = nextIds - (batchSize - i) + 1, + Key = i.ToString(), + Value = Guid.NewGuid().ToString(), + Description = "Description" + }); + + redisMessages.Lists["listName"].AddRange(msgBatch); + + var msgs = redisMessages.Lists["listName"].GetAll(); + Assert.That(msgs.Count, Is.EqualTo(batchSize)); + + Assert.That(msgs.First().Id, Is.EqualTo(1)); + Assert.That(msgs.Last().Id, Is.EqualTo(500)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs b/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs index 226ea563..d4c70384 100644 --- a/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs @@ -1,13 +1,13 @@ using System; using System.Diagnostics; using NUnit.Framework; +using ServiceStack.Logging; using ServiceStack.Redis; using ServiceStack.Text; - namespace ServiceStack.Redis.Tests { - [TestFixture] + [TestFixture, Ignore("Benchmark")] public class RedisBenchmarkTests : RedisClientTestsBase { @@ -39,14 +39,12 @@ public void Measure_pipeline_speedup() using (var pipeline = Redis.CreatePipeline()) { for (int i = 0; i < total; ++i) - pipeline.QueueCommand(r => ((RedisNativeClient)Redis).Set(key + i.ToString(),temp)); + pipeline.QueueCommand(r => ((RedisNativeClient)Redis).Set(key + i.ToString(), temp)); pipeline.Flush(); } sw.Stop(); Debug.WriteLine(String.Format("Time for pipelining {0} Set(key,value) operations: {1} ms", total, sw.ElapsedMilliseconds)); - - } private string[] stringsFromBytes(byte[][] input) @@ -56,13 +54,14 @@ private string[] stringsFromBytes(byte[][] input) return new string[1]; } - var rc = new string[input.Length]; - for (int i = 0; i < input.Length; ++i ) + var rc = new string[input.Length]; + for (int i = 0; i < input.Length; ++i) { rc[i] = input[i].FromUtf8Bytes(); } return rc; } + [Test] public void Compare_sort_nosort_to_smembers_mget() { @@ -73,7 +72,6 @@ public void Compare_sort_nosort_to_smembers_mget() byte fixedValue = 124; temp[0] = fixedValue; - //initialize set and individual keys Redis.Del(setKey); for (var i = 0; i < total; ++i) @@ -90,7 +88,7 @@ public void Compare_sort_nosort_to_smembers_mget() { var keys = Redis.SMembers(setKey); results = Redis.MGet(keys); - + } sw.Stop(); @@ -101,13 +99,12 @@ public void Compare_sort_nosort_to_smembers_mget() Assert.AreEqual(result[0], fixedValue); } - Debug.WriteLine(String.Format("Time to call {0} SMembers and MGet operations: {1} ms", count, sw.ElapsedMilliseconds)); - var opt = new SortOptions() {SortPattern = "nosort", GetPattern = "*"}; + var opt = new SortOptions() { SortPattern = "nosort", GetPattern = "*" }; sw = Stopwatch.StartNew(); for (int i = 0; i < count; ++i) - results = Redis.Sort(setKey, opt); + results = Redis.Sort(setKey, opt); sw.Stop(); //make sure that results are valid @@ -116,11 +113,87 @@ public void Compare_sort_nosort_to_smembers_mget() Assert.AreEqual(result[0], fixedValue); } - Debug.WriteLine(String.Format("Time to call {0} sort operations: {1} ms", count, sw.ElapsedMilliseconds)); + } + } + + [TestFixture, Ignore("Benchmark")] + public class RawBytesSetBenchmark + { + public void Run(string name, int nBlockSizeBytes, Action fn) + { + Stopwatch sw; + long ms1, ms2, interval; + int nBytesHandled = 0; + int nMaxIterations = 5; + byte[] pBuffer = new byte[nBlockSizeBytes]; + + // Create Redis Wrapper + var redis = new RedisNativeClient(); + + // Clear DB + redis.FlushAll(); + + sw = Stopwatch.StartNew(); + ms1 = sw.ElapsedMilliseconds; + for (int i = 0; i < nMaxIterations; i++) + { + fn(i, pBuffer); + nBytesHandled += nBlockSizeBytes; + } + + ms2 = sw.ElapsedMilliseconds; + interval = ms2 - ms1; + + // Calculate rate + double dMBPerSEc = nBytesHandled / 1024.0 / 1024.0 / (interval / 1000.0); + Console.WriteLine(name + ": Rate {0:N4}, Total: {1}ms", dMBPerSEc, ms2); + } + [Test] + public void Benchmark_SET_raw_bytes_8MB_ServiceStack() + { + var redis = new RedisNativeClient(); + + Run("ServiceStack.Redis 8MB", 8000000, + (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); + } + + [Test] + public void Benchmark_SET_raw_bytes_1MB_ServiceStack() + { + var redis = new RedisNativeClient(); + + Run("ServiceStack.Redis 1MB", 1000000, + (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); + } + + [Test] + public void Benchmark_SET_raw_bytes_100k_ServiceStack() + { + var redis = new RedisNativeClient(); + + Run("ServiceStack.Redis 100K", 100000, + (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); + } + + [Test] + public void Benchmark_SET_raw_bytes_10k_ServiceStack() + { + var redis = new RedisNativeClient(); + + Run("ServiceStack.Redis 10K", 10000, + (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); + } + + [Test] + public void Benchmark_SET_raw_bytes_1k_ServiceStack() + { + var redis = new RedisNativeClient(); + Run("ServiceStack.Redis 1K", 1000, + (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); } } -} +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.Async.cs new file mode 100644 index 00000000..4ee85060 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.Async.cs @@ -0,0 +1,139 @@ +using System; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Caching; +using ServiceStack.Common.Tests.Models; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + [Category("Async")] + public class RedisCacheClientTestsAsync + { + private ICacheClientAsync cacheClient; + + [SetUp] + public async Task OnBeforeEachTest() + { + if (cacheClient is object) + await cacheClient.DisposeAsync(); + + cacheClient = new RedisClient(TestConfig.SingleHost); + await cacheClient.FlushAllAsync(); + } + + [Test] + public async Task Get_non_existant_value_returns_null() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + var existingModel = await cacheClient.GetAsync(cacheKey); + Assert.That(existingModel, Is.Null); + } + + [Test] + public async Task Get_non_existant_generic_value_returns_null() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + var existingModel = await cacheClient.GetAsync(cacheKey); + Assert.That(existingModel, Is.Null); + } + + [Test] + public async Task Can_store_and_get_model() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + await cacheClient.SetAsync(cacheKey, model); + + var existingModel = await cacheClient.GetAsync(cacheKey); + ModelWithIdAndName.AssertIsEqual(existingModel, model); + } + + [Test] + public async Task Can_store_null_model() + { + await cacheClient.SetAsync("test-key", null); + } + + [Test] + public async Task Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + await cacheClient.SetAsync(key, value); + var resultValue = await cacheClient.GetAsync(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + [Test] + public async Task Can_Replace_By_Pattern() + { + var model = ModelWithIdAndName.Create(1); + string modelKey = "model:" + model.CreateUrn(); + await cacheClient.AddAsync(modelKey, model); + + model = ModelWithIdAndName.Create(2); + string modelKey2 = "xxmodelxx:" + model.CreateUrn(); + await cacheClient.AddAsync(modelKey2, model); + + string s = "this is a string"; + await cacheClient.AddAsync("string1", s); + + var removable = (IRemoveByPatternAsync)cacheClient; + await removable.RemoveByPatternAsync("*model*"); + + ModelWithIdAndName result = await cacheClient.GetAsync(modelKey); + Assert.That(result, Is.Null); + + result = await cacheClient.GetAsync(modelKey2); + Assert.That(result, Is.Null); + + string result2 = await cacheClient.GetAsync("string1"); + Assert.That(result2, Is.EqualTo(s)); + + await removable.RemoveByPatternAsync("string*"); + + result2 = await cacheClient.GetAsync("string1"); + Assert.That(result2, Is.Null); + } + + [Test] + public async Task Can_GetTimeToLive() + { + var model = ModelWithIdAndName.Create(1); + string key = "model:" + model.CreateUrn(); + await cacheClient.AddAsync(key, model); + + var ttl = await cacheClient.GetTimeToLiveAsync(key); + Assert.That(ttl, Is.EqualTo(TimeSpan.MaxValue)); + + await cacheClient.SetAsync(key, model, expiresIn: TimeSpan.FromSeconds(10)); + ttl = await cacheClient.GetTimeToLiveAsync(key); + Assert.That(ttl.Value, Is.GreaterThanOrEqualTo(TimeSpan.FromSeconds(9))); + Assert.That(ttl.Value, Is.LessThanOrEqualTo(TimeSpan.FromSeconds(10))); + + await cacheClient.RemoveAsync(key); + ttl = await cacheClient.GetTimeToLiveAsync(key); + Assert.That(ttl, Is.Null); + } + + [Test] + public async Task Can_increment_and_reset_values() + { + await using var client = await new RedisManagerPool(TestConfig.SingleHost).GetCacheClientAsync(); + + Assert.That(await client.IncrementAsync("incr:counter", 10), Is.EqualTo(10)); + await client.SetAsync("incr:counter", 0); + Assert.That(await client.IncrementAsync("incr:counter", 10), Is.EqualTo(10)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs index b9bc20bd..6817d7ec 100644 --- a/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs @@ -1,78 +1,141 @@ -using System.Collections.Generic; +#if !NETCORE //TODO: find out why fails to build in .netcoreapp1.1 + +using System; using NUnit.Framework; -using ServiceStack.CacheAccess; +using ServiceStack.Caching; using ServiceStack.Common.Tests.Models; -using ServiceStack.Common.Utils; namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisCacheClientTests - { - private ICacheClient cacheClient; - - [SetUp] - public void OnBeforeEachTest() - { - if (cacheClient != null) - cacheClient.Dispose(); - - cacheClient = new RedisClient(TestConfig.SingleHost); - cacheClient.FlushAll(); - } - - [Test] - public void Get_non_existant_value_returns_null() - { - var model = ModelWithIdAndName.Create(1); - var cacheKey = model.CreateUrn(); - var existingModel = cacheClient.Get(cacheKey); - Assert.That(existingModel, Is.Null); - } - - - [Test] - public void Get_non_existant_generic_value_returns_null() - { - var model = ModelWithIdAndName.Create(1); - var cacheKey = model.CreateUrn(); - var existingModel = cacheClient.Get(cacheKey); - Assert.That(existingModel, Is.Null); - } - - [Test] - public void Can_store_and_get_model() - { - var model = ModelWithIdAndName.Create(1); - var cacheKey = model.CreateUrn(); - cacheClient.Set(cacheKey, model); - - var existingModel = cacheClient.Get(cacheKey); - ModelWithIdAndName.AssertIsEqual(existingModel, model); - } - - - [Test] - public void Can_store_null_model() - { - cacheClient.Set("test-key", null); - } - - [Test] - public void Can_Set_and_Get_key_with_all_byte_values() - { - const string key = "bytesKey"; - - var value = new byte[256]; - for (var i = 0; i < value.Length; i++) - { - value[i] = (byte)i; - } - - cacheClient.Set(key, value); - var resultValue = cacheClient.Get(key); - - Assert.That(resultValue, Is.EquivalentTo(value)); - } - } + [TestFixture] + public class RedisCacheClientTests + { + private ICacheClientExtended cacheClient; + + [SetUp] + public void OnBeforeEachTest() + { + if (cacheClient != null) + cacheClient.Dispose(); + + cacheClient = new RedisClient(TestConfig.SingleHost); + cacheClient.FlushAll(); + } + + [Test] + public void Get_non_existant_value_returns_null() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + var existingModel = cacheClient.Get(cacheKey); + Assert.That(existingModel, Is.Null); + } + + [Test] + public void Get_non_existant_generic_value_returns_null() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + var existingModel = cacheClient.Get(cacheKey); + Assert.That(existingModel, Is.Null); + } + + [Test] + public void Can_store_and_get_model() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + cacheClient.Set(cacheKey, model); + + var existingModel = cacheClient.Get(cacheKey); + ModelWithIdAndName.AssertIsEqual(existingModel, model); + } + + [Test] + public void Can_store_null_model() + { + cacheClient.Set("test-key", null); + } + + [Test] + public void Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + cacheClient.Set(key, value); + var resultValue = cacheClient.Get(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + [Test] + public void Can_Replace_By_Pattern() + { + var model = ModelWithIdAndName.Create(1); + string modelKey = "model:" + model.CreateUrn(); + cacheClient.Add(modelKey, model); + + model = ModelWithIdAndName.Create(2); + string modelKey2 = "xxmodelxx:" + model.CreateUrn(); + cacheClient.Add(modelKey2, model); + + string s = "this is a string"; + cacheClient.Add("string1", s); + + cacheClient.RemoveByPattern("*model*"); + + ModelWithIdAndName result = cacheClient.Get(modelKey); + Assert.That(result, Is.Null); + + result = cacheClient.Get(modelKey2); + Assert.That(result, Is.Null); + + string result2 = cacheClient.Get("string1"); + Assert.That(result2, Is.EqualTo(s)); + + cacheClient.RemoveByPattern("string*"); + + result2 = cacheClient.Get("string1"); + Assert.That(result2, Is.Null); + } + + [Test] + public void Can_GetTimeToLive() + { + var model = ModelWithIdAndName.Create(1); + string key = "model:" + model.CreateUrn(); + cacheClient.Add(key, model); + + var ttl = cacheClient.GetTimeToLive(key); + Assert.That(ttl, Is.EqualTo(TimeSpan.MaxValue)); + + cacheClient.Set(key, model, expiresIn: TimeSpan.FromSeconds(10)); + ttl = cacheClient.GetTimeToLive(key); + Assert.That(ttl.Value, Is.GreaterThanOrEqualTo(TimeSpan.FromSeconds(9))); + Assert.That(ttl.Value, Is.LessThanOrEqualTo(TimeSpan.FromSeconds(10))); + + cacheClient.Remove(key); + ttl = cacheClient.GetTimeToLive(key); + Assert.That(ttl, Is.Null); + } + + [Test] + public void Can_increment_and_reset_values() + { + using (var client = new RedisManagerPool(TestConfig.SingleHost).GetCacheClient()) + { + Assert.That(client.Increment("incr:counter", 10), Is.EqualTo(10)); + client.Set("incr:counter", 0); + Assert.That(client.Increment("incr:counter", 10), Is.EqualTo(10)); + } + } + } } + +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.Async.cs new file mode 100644 index 00000000..8dfe12ec --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.Async.cs @@ -0,0 +1,108 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientConfigTestsAsync + : RedisClientTestsBaseAsync + { + [Ignore("Hurts MSOpenTech Redis Server")] + [Test] + public async Task Can_Set_and_Get_Config() + { + var orig = await RedisAsync.GetConfigAsync("maxmemory"); + var newMaxMemory = (long.Parse(orig) + 1).ToString(); + await RedisAsync.SetConfigAsync("maxmemory", newMaxMemory); + var current = await RedisAsync.GetConfigAsync("maxmemory"); + Assert.That(current, Is.EqualTo(newMaxMemory)); + } + + [Test] + public async Task Can_Rewrite_Redis_Config() + { + try + { + await RedisAsync.SaveConfigAsync(); + } + catch (RedisResponseException ex) + { + if (ex.Message.StartsWith("Rewriting config file: Permission denied") + || ex.Message.StartsWith("The server is running without a config file")) + return; + throw; + } + } + + [Test] + public async Task Can_Rewrite_Info_Stats() + { + await RedisAsync.ResetInfoStatsAsync(); + } + + [Test] + public async Task Can_set_and_Get_Client_Name() + { + var clientName = "CLIENT-" + Environment.TickCount; + await RedisAsync.SetClientAsync(clientName); + var client = await RedisAsync.GetClientAsync(); + + Assert.That(client, Is.EqualTo(clientName)); + } + + [Test] + public async Task Can_GetClientsInfo() + { + var clientList = await RedisAsync.GetClientsInfoAsync(); + clientList.PrintDump(); + } + + [Test] + public async Task Can_Kill_Client() + { + var clientList = await RedisAsync.GetClientsInfoAsync(); + var firstAddr = clientList.First()["addr"]; + await RedisAsync.KillClientAsync(firstAddr); + } + + [Test] + public async Task Can_Kill_Clients() + { + await RedisAsync.KillClientsAsync(fromAddress: "192.168.0.1:6379"); + await RedisAsync.KillClientsAsync(withId: "1"); + await RedisAsync.KillClientsAsync(ofType: RedisClientType.Normal); + await RedisAsync.KillClientsAsync(ofType: RedisClientType.PubSub); + await RedisAsync.KillClientsAsync(ofType: RedisClientType.Slave); + await RedisAsync.KillClientsAsync(skipMe: true); + await RedisAsync.KillClientsAsync(fromAddress: "192.168.0.1:6379", withId: "1", ofType: RedisClientType.Normal); + await RedisAsync.KillClientsAsync(skipMe: false); + } + + [Test] + public async Task Can_get_Role_Info() + { + var result = await NativeAsync.RoleAsync(); + result.PrintDump(); + Assert.That(result.Children[0].Text, Is.EqualTo("master")); + Assert.That(await RedisAsync.GetServerRoleAsync(), Is.EqualTo(RedisServerRole.Master)); + + //needs redis-server v3.0 + //var replica = new RedisClient("10.0.0.9:6380"); + //result = replica.Role(); + //result.PrintDump(); + } + + [Test] + public Task Can_PauseAllClients() + { + //needs redis-server v3.0 + //var replica = new RedisClient("10.0.0.9:6380"); + //replica.PauseAllClients(TimeSpan.FromSeconds(2)); + + return Task.CompletedTask; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs new file mode 100644 index 00000000..44b6b4f1 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs @@ -0,0 +1,105 @@ +using System; +using System.Linq; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientConfigTests + : RedisClientTestsBase + { + [Ignore("Hurts MSOpenTech Redis Server")] + [Test] + public void Can_Set_and_Get_Config() + { + var orig = Redis.GetConfig("maxmemory"); + var newMaxMemory = (long.Parse(orig) + 1).ToString(); + Redis.SetConfig("maxmemory", newMaxMemory); + var current = Redis.GetConfig("maxmemory"); + Assert.That(current, Is.EqualTo(newMaxMemory)); + } + + [Test] + public void Can_Rewrite_Redis_Config() + { + try + { + Redis.SaveConfig(); + } + catch (RedisResponseException ex) + { + if (ex.Message.StartsWith("Rewriting config file: Permission denied") + || ex.Message.StartsWith("The server is running without a config file")) + return; + throw; + } + } + + [Test] + public void Can_Rewrite_Info_Stats() + { + Redis.ResetInfoStats(); + } + + [Test] + public void Can_set_and_Get_Client_Name() + { + var clientName = "CLIENT-" + Environment.TickCount; + Redis.SetClient(clientName); + var client = Redis.GetClient(); + + Assert.That(client, Is.EqualTo(clientName)); + } + + [Test] + public void Can_GetClientsInfo() + { + var clientList = Redis.GetClientsInfo(); + clientList.PrintDump(); + } + + [Test] + public void Can_Kill_Client() + { + var clientList = Redis.GetClientsInfo(); + var firstAddr = clientList.First()["addr"]; + Redis.KillClient(firstAddr); + } + + [Test] + public void Can_Kill_Clients() + { + Redis.KillClients(fromAddress: "192.168.0.1:6379"); + Redis.KillClients(withId: "1"); + Redis.KillClients(ofType: RedisClientType.Normal); + Redis.KillClients(ofType: RedisClientType.PubSub); + Redis.KillClients(ofType: RedisClientType.Slave); + Redis.KillClients(skipMe: true); + Redis.KillClients(fromAddress: "192.168.0.1:6379", withId: "1", ofType: RedisClientType.Normal); + Redis.KillClients(skipMe: false); + } + + [Test] + public void Can_get_Role_Info() + { + var result = Redis.Role(); + result.PrintDump(); + Assert.That(result.Children[0].Text, Is.EqualTo("master")); + Assert.That(Redis.GetServerRole(), Is.EqualTo(RedisServerRole.Master)); + + //needs redis-server v3.0 + //var replica = new RedisClient("10.0.0.9:6380"); + //result = replica.Role(); + //result.PrintDump(); + } + + [Test] + public void Can_PauseAllClients() + { + //needs redis-server v3.0 + //var replica = new RedisClient("10.0.0.9:6380"); + //replica.PauseAllClients(TimeSpan.FromSeconds(2)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.Async.cs new file mode 100644 index 00000000..8a92f078 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.Async.cs @@ -0,0 +1,200 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisClientEvalTestsAsync : RedisClientTestsBaseAsync + { + public override void OnBeforeEachTest() + { + //base.OnBeforeEachTest(); + + //Run on local build server + RedisRaw = new RedisClient(TestConfig.SingleHost); + RedisRaw.FlushAll(); + } + + [Test] + public async Task Can_Eval_int() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return 3141591", Array.Empty()); + Assert.That(intVal, Is.EqualTo(3141591)); + } + + [Test] + public async Task Can_EvalSha_int() + { + var luaBody = "return 3141591"; + await RedisAsync.ExecLuaAsIntAsync(luaBody, Array.Empty()); + var sha1 = await RedisAsync.CalculateSha1Async(luaBody); + var intVal = await RedisAsync.ExecLuaShaAsIntAsync(sha1, Array.Empty()); + Assert.That(intVal, Is.EqualTo(3141591)); + } + + [Test] + public async Task Can_Eval_int_with_args() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return 3141591", new[] { "20", "30", "40" }); + Assert.That(intVal, Is.EqualTo(3141591)); + } + + [Test] + public async Task Can_Eval_int_with_keys_and_args() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return KEYS[1] + ARGV[1]", new[] { "20" }, new[] { "30", "40" }); + Assert.That(intVal, Is.EqualTo(50)); + } + + [Test] + public async Task Can_Eval_int2() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return ARGV[1] + ARGV[2]", new[] { "10", "20" }); + Assert.That(intVal, Is.EqualTo(30)); + } + + [Test] + public async Task Can_Eval_string() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return 'abc'", new string[0]); + Assert.That(strVal, Is.EqualTo("abc")); + } + + [Test] + public async Task Can_Eval_HelloWorld_string() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return 'Hello, ' .. ARGV[1] .. '!'", new[] { "Redis Lua" }); + Assert.That(strVal, Is.EqualTo("Hello, Redis Lua!")); + } + + [Test] + public async Task Can_Eval_string_with_args() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return 'abc'", new[] { "at", "dot", "com" }); + Assert.That(strVal, Is.EqualTo("abc")); + } + + [Test] + public async Task Can_Eval_string_with_keys_an_args() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return KEYS[1] .. ARGV[1]", new[] { "at" }, new[] { "dot", "com" }); + Assert.That(strVal, Is.EqualTo("atdot")); + } + + [Test] + public async Task Can_Eval_multidata_with_args() + { + var strVals = await RedisAsync.ExecLuaAsListAsync(@"return {ARGV[1],ARGV[2],ARGV[3]}", new[] { "at", "dot", "com" }); + Assert.That(strVals, Is.EquivalentTo(new List { "at", "dot", "com" })); + } + + [Test] + public async Task Can_Eval_multidata_with_keys_and_args() + { + var strVals = await RedisAsync.ExecLuaAsListAsync(@"return {KEYS[1],ARGV[1],ARGV[2]}", new[] { "at" }, new[] { "dot", "com" }); + Assert.That(strVals, Is.EquivalentTo(new List { "at", "dot", "com" })); + } + + [Test] + public async Task Can_Load_and_Exec_script() + { + var luaBody = "return 'load script and exec'"; + var sha1 = await RedisAsync.LoadLuaScriptAsync(luaBody); + var result = await RedisAsync.ExecLuaShaAsStringAsync(sha1, new string[0]); + Assert.That(result, Is.EqualTo("load script and exec")); + } + + [Test] + public async Task Does_flush_all_scripts() + { + var luaBody = "return 'load script and exec'"; + var sha1 = await RedisAsync.LoadLuaScriptAsync(luaBody); + var result = await RedisAsync.ExecLuaShaAsStringAsync(sha1, new string[0]); + Assert.That(result, Is.EqualTo("load script and exec")); + + await RedisAsync.RemoveAllLuaScriptsAsync(); + + try + { + result = await RedisAsync.ExecLuaShaAsStringAsync(sha1, new string[0]); + Assert.Fail("script shouldn't exist"); + } + catch (RedisResponseException ex) + { + Assert.That(ex.Message, Does.Contain("NOSCRIPT")); + } + } + + [Test] + public async Task Can_detect_which_scripts_exist() + { + var sha1 = await RedisAsync.LoadLuaScriptAsync("return 'script1'"); + var sha2 = await RedisAsync.CalculateSha1Async("return 'script2'"); + var sha3 = await RedisAsync.LoadLuaScriptAsync("return 'script3'"); + + Assert.That(await RedisAsync.HasLuaScriptAsync(sha1)); + + var existsMap = await RedisAsync.WhichLuaScriptsExistsAsync(new[] { sha1, sha2, sha3 }); + Assert.That(existsMap[sha1]); + Assert.That(!existsMap[sha2]); + Assert.That(existsMap[sha3]); + } + + [Test] + public async Task Can_create_ZPop_with_lua() + { + var luaBody = @" + local val = redis.call('zrange', KEYS[1], 0, ARGV[1]-1) + if val then redis.call('zremrangebyrank', KEYS[1], 0, ARGV[1]-1) end + return val"; + + var i = 0; + var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); + foreach (var x in alphabet) + { + await RedisAsync.AddItemToSortedSetAsync("zalphabet", x, i++); + } + + var letters = await RedisAsync.ExecLuaAsListAsync(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); + + letters.PrintDump(); + Assert.That(letters, Is.EquivalentTo(new[] { "A", "B", "C" })); + } + + [Test] + public async Task Can_create_ZRevPop_with_lua() + { + var luaBody = @" + local val = redis.call('zrange', KEYS[1], -ARGV[1], -1) + if val then redis.call('zremrangebyrank', KEYS[1], -ARGV[1], -1) end + return val"; + + var i = 0; + var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); + foreach(var x in alphabet) + { + await RedisAsync.AddItemToSortedSetAsync("zalphabet", x, i++); + } + + var letters = await RedisAsync.ExecLuaAsListAsync(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); + + letters.PrintDump(); + Assert.That(letters, Is.EquivalentTo(new[] { "X", "Y", "Z" })); + } + + [Test] + public async Task Can_return_DaysOfWeek_as_list() + { + foreach(var x in Enum.GetNames(typeof(DayOfWeek)).ToList()) + { + await RedisAsync.AddItemToListAsync("DaysOfWeek", x); + } + (await RedisAsync.ExecLuaAsListAsync("return redis.call('LRANGE', 'DaysOfWeek', 0, -1)", new string[0])).PrintDump(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs index 04890fbb..b31ff348 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs @@ -3,7 +3,6 @@ using System.Linq; using NUnit.Framework; using ServiceStack.Common; -using ServiceStack.Net30; using ServiceStack.Text; namespace ServiceStack.Redis.Tests @@ -16,7 +15,7 @@ public override void OnBeforeEachTest() //base.OnBeforeEachTest(); //Run on local build server - Redis = new RedisClient("192.168.2.16"); + Redis = new RedisClient(TestConfig.SingleHost); Redis.FlushAll(); } @@ -126,7 +125,7 @@ public void Does_flush_all_scripts() } catch (RedisResponseException ex) { - Assert.That(ex.Message, Is.StringContaining("NOSCRIPT")); + Assert.That(ex.Message, Does.Contain("NOSCRIPT")); } } @@ -184,7 +183,7 @@ public void Can_create_ZRevPop_with_lua() [Test] public void Can_return_DaysOfWeek_as_list() { - Enum.GetNames(typeof (DayOfWeek)).ToList() + Enum.GetNames(typeof(DayOfWeek)).ToList() .ForEach(x => Redis.AddItemToList("DaysOfWeek", x)); Redis.ExecLuaAsList("return redis.call('LRANGE', 'DaysOfWeek', 0, -1)").PrintDump(); } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientHashTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientHashTests.Async.cs new file mode 100644 index 00000000..f95f9376 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientHashTests.Async.cs @@ -0,0 +1,351 @@ +using NUnit.Framework; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientHashTestsAsync + : RedisClientTestsBaseAsync + { + private const string HashId = "rchtesthash"; + + Dictionary stringMap; + Dictionary stringIntMap; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + stringIntMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + } + + public override void OnAfterEachTest() + { + CleanMask = HashId + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_SetItemInHash_and_GetAllFromHash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_RemoveFromHash() + { + const string removeMember = "two"; + + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + await RedisAsync.RemoveEntryFromHashAsync(HashId, removeMember); + + stringMap.Remove(removeMember); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_GetItemFromHash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var hashValue = await RedisAsync.GetValueFromHashAsync(HashId, "two"); + + Assert.That(hashValue, Is.EqualTo(stringMap["two"])); + } + + [Test] + public async Task Can_GetHashCount() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var hashCount = await RedisAsync.GetHashCountAsync(HashId); + + Assert.That(hashCount, Is.EqualTo(stringMap.Count)); + } + + [Test] + public async Task Does_HashContainsKey() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + Assert.That(await RedisAsync.HashContainsEntryAsync(HashId, existingMember), Is.True); + Assert.That(await RedisAsync.HashContainsEntryAsync(HashId, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_GetHashKeys() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + var expectedKeys = stringMap.Map(x => x.Key); + + var hashKeys = await RedisAsync.GetHashKeysAsync(HashId); + + Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); + } + + [Test] + public async Task Can_GetHashValues() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + var expectedValues = stringMap.Map(x => x.Value); + + var hashValues = await RedisAsync.GetHashValuesAsync(HashId); + + Assert.That(hashValues, Is.EquivalentTo(expectedValues)); + } + + [Test] + public async Task Can_enumerate_small_IDictionary_Hash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var members = new List(); + await foreach (var item in RedisAsync.Hashes[HashId]) + { + Assert.That(stringMap.ContainsKey(item.Key), Is.True); + members.Add(item.Key); + } + Assert.That(members.Count, Is.EqualTo(stringMap.Count)); + } + + [Test] + public async Task Can_Add_to_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_Clear_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + Assert.That(await hash.CountAsync(), Is.EqualTo(stringMap.Count)); + + await hash.ClearAsync(); + + Assert.That(await hash.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + Assert.That(await hash.ContainsKeyAsync("two"), Is.True); + Assert.That(await hash.ContainsKeyAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + stringMap.Remove("two"); + await hash.RemoveAsync("two"); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + private static Dictionary ToStringMap(Dictionary stringIntMap) + { + var map = new Dictionary(); + foreach (var kvp in stringIntMap) + { + map[kvp.Key] = kvp.Value.ToString(); + } + return map; + } + + [Test] + public async Task Can_increment_Hash_field() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringIntMap) + { + await hash.AddAsync(x.Key, x.Value.ToString()); + } + + stringIntMap["two"] += 10; + await RedisAsync.IncrementValueInHashAsync(HashId, "two", 10); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(ToStringMap(stringIntMap))); + } + + [Test] + public async Task Can_increment_Hash_field_beyond_32_bits() + { + await RedisAsync.SetEntryInHashAsync(HashId, "int", Int32.MaxValue.ToString()); + await RedisAsync.IncrementValueInHashAsync(HashId, "int", 1); + long actual = Int64.Parse(await RedisAsync.GetValueFromHashAsync(HashId, "int")); + long expected = Int32.MaxValue + 1L; + Assert.That(actual, Is.EqualTo(expected)); + } + + [Test] + public async Task Can_SetItemInHashIfNotExists() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + await RedisAsync.SetEntryInHashIfNotExistsAsync(HashId, "two", "did not change existing item"); + await RedisAsync.SetEntryInHashIfNotExistsAsync(HashId, "five", "changed non existing item"); + stringMap["five"] = "changed non existing item"; + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_SetRangeInHash() + { + var newStringMap = new Dictionary { + {"five","e"}, {"six","f"}, {"seven","g"} + }; + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + await RedisAsync.SetRangeInHashAsync(HashId, newStringMap); + + newStringMap.Each(x => stringMap.Add(x.Key, x.Value)); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_GetItemsFromHash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var expectedValues = new List { stringMap["one"], stringMap["two"], null }; + var hashValues = await RedisAsync.GetValuesFromHashAsync(HashId, new[] { "one", "two", "not-exists" }); + + Assert.That(hashValues.EquivalentTo(expectedValues), Is.True); + } + [Test] + public async Task Can_hash_set() + { + var key = HashId + "key"; + var field = GetBytes("foo"); + var value = GetBytes("value"); + Assert.AreEqual(await NativeAsync.HDelAsync(key, field), 0); + Assert.AreEqual(await NativeAsync.HSetAsync(key, field, value), 1); + Assert.AreEqual(await NativeAsync.HDelAsync(key, field), 1); + } + + [Test] + public async Task Can_hash_multi_set_and_get() + { + const string Key = HashId + "multitest"; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var fields = new Dictionary { { "field1", "1" }, { "field2", "2" }, { "field3", "3" } }; + + await RedisAsync.SetRangeInHashAsync(Key, fields); + var members = await RedisAsync.GetAllEntriesFromHashAsync(Key); + foreach (var member in members) + { + Assert.IsTrue(fields.ContainsKey(member.Key)); + Assert.AreEqual(fields[member.Key], member.Value); + } + } + + public class HashTest + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public async Task Can_store_as_Hash() + { + var dto = new HashTest { Id = 1 }; + await RedisAsync.StoreAsHashAsync(dto); + + var storedHash = await RedisAsync.GetHashKeysAsync(dto.ToUrn()); + Assert.That(storedHash, Is.EquivalentTo(new[] { "Id" })); + + var hold = RedisClient.ConvertToHashFn; + RedisClient.ConvertToHashFn = o => + { + var map = new Dictionary(); + o.ToObjectDictionary().Each(x => map[x.Key] = (x.Value ?? "").ToJsv()); + return map; + }; + + await RedisAsync.StoreAsHashAsync(dto); + storedHash = await RedisAsync.GetHashKeysAsync(dto.ToUrn()); + Assert.That(storedHash, Is.EquivalentTo(new[] { "Id", "Name" })); + + RedisClient.ConvertToHashFn = hold; + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientHashTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientHashTests.cs index 2f785b5d..ccbab8a9 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientHashTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientHashTests.cs @@ -1,245 +1,258 @@ using System; using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Generic; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisClientHashTests - : RedisClientTestsBase - { - private const string HashId = "rchtesthash"; - - Dictionary stringMap; - Dictionary stringIntMap; - - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); - stringMap = new Dictionary { - {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} - }; - stringIntMap = new Dictionary { - {"one",1}, {"two",2}, {"three",3}, {"four",4} - }; - } - - public override void TearDown() + [TestFixture] + public class RedisClientHashTests + : RedisClientTestsBase + { + private const string HashId = "rchtesthash"; + + Dictionary stringMap; + Dictionary stringIntMap; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + stringIntMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + } + + public override void OnAfterEachTest() { CleanMask = HashId + "*"; - base.TearDown(); + base.OnAfterEachTest(); + } + + [Test] + public void Can_SetItemInHash_and_GetAllFromHash() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public void Can_RemoveFromHash() + { + const string removeMember = "two"; + + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + Redis.RemoveEntryFromHash(HashId, removeMember); + + stringMap.Remove(removeMember); + + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public void Can_GetItemFromHash() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + var hashValue = Redis.GetValueFromHash(HashId, "two"); + + Assert.That(hashValue, Is.EqualTo(stringMap["two"])); + } + + [Test] + public void Can_GetHashCount() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + var hashCount = Redis.GetHashCount(HashId); + + Assert.That(hashCount, Is.EqualTo(stringMap.Count)); } - [Test] - public void Can_SetItemInHash_and_GetAllFromHash() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + [Test] + public void Does_HashContainsKey() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(stringMap)); - } + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - [Test] - public void Can_RemoveFromHash() - { - const string removeMember = "two"; + Assert.That(Redis.HashContainsEntry(HashId, existingMember), Is.True); + Assert.That(Redis.HashContainsEntry(HashId, nonExistingMember), Is.False); + } - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + [Test] + public void Can_GetHashKeys() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + var expectedKeys = stringMap.Map(x => x.Key); - Redis.RemoveEntryFromHash(HashId, removeMember); + var hashKeys = Redis.GetHashKeys(HashId); - stringMap.Remove(removeMember); + Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); + } - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(stringMap)); - } + [Test] + public void Can_GetHashValues() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + var expectedValues = stringMap.Map(x => x.Value); - [Test] - public void Can_GetItemFromHash() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + var hashValues = Redis.GetHashValues(HashId); - var hashValue = Redis.GetValueFromHash(HashId, "two"); + Assert.That(hashValues, Is.EquivalentTo(expectedValues)); + } - Assert.That(hashValue, Is.EqualTo(stringMap["two"])); - } + [Test] + public void Can_enumerate_small_IDictionary_Hash() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - [Test] - public void Can_GetHashCount() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + var members = new List(); + foreach (var item in Redis.Hashes[HashId]) + { + Assert.That(stringMap.ContainsKey(item.Key), Is.True); + members.Add(item.Key); + } + Assert.That(members.Count, Is.EqualTo(stringMap.Count)); + } - var hashCount = Redis.GetHashCount(HashId); + [Test] + public void Can_Add_to_IDictionary_Hash() + { + var hash = Redis.Hashes[HashId]; + stringMap.Each(hash.Add); - Assert.That(hashCount, Is.EqualTo(stringMap.Count)); - } + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } - [Test] - public void Does_HashContainsKey() - { - const string existingMember = "two"; - const string nonExistingMember = "five"; + [Test] + public void Can_Clear_IDictionary_Hash() + { + var hash = Redis.Hashes[HashId]; + stringMap.Each(hash.Add); + + Assert.That(hash.Count, Is.EqualTo(stringMap.Count)); - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - - Assert.That(Redis.HashContainsEntry(HashId, existingMember), Is.True); - Assert.That(Redis.HashContainsEntry(HashId, nonExistingMember), Is.False); - } - - [Test] - public void Can_GetHashKeys() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - var expectedKeys = stringMap.ConvertAll(x => x.Key); + hash.Clear(); - var hashKeys = Redis.GetHashKeys(HashId); - - Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); - } - - [Test] - public void Can_GetHashValues() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - var expectedValues = stringMap.ConvertAll(x => x.Value); - - var hashValues = Redis.GetHashValues(HashId); - - Assert.That(hashValues, Is.EquivalentTo(expectedValues)); - } - - [Test] - public void Can_enumerate_small_IDictionary_Hash() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - - var members = new List(); - foreach (var item in Redis.Hashes[HashId]) - { - Assert.That(stringMap.ContainsKey(item.Key), Is.True); - members.Add(item.Key); - } - Assert.That(members.Count, Is.EqualTo(stringMap.Count)); - } - - [Test] - public void Can_Add_to_IDictionary_Hash() - { - var hash = Redis.Hashes[HashId]; - stringMap.ForEach(x => hash.Add(x)); + Assert.That(hash.Count, Is.EqualTo(0)); + } - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(stringMap)); - } - - [Test] - public void Can_Clear_IDictionary_Hash() - { - var hash = Redis.Hashes[HashId]; - stringMap.ForEach(x => hash.Add(x)); - - Assert.That(hash.Count, Is.EqualTo(stringMap.Count)); - - hash.Clear(); - - Assert.That(hash.Count, Is.EqualTo(0)); - } - - [Test] - public void Can_Test_Contains_in_IDictionary_Hash() - { - var hash = Redis.Hashes[HashId]; - stringMap.ForEach(x => hash.Add(x)); - - Assert.That(hash.ContainsKey("two"), Is.True); - Assert.That(hash.ContainsKey("five"), Is.False); - } - - [Test] - public void Can_Remove_value_from_IDictionary_Hash() - { - var hash = Redis.Hashes[HashId]; - stringMap.ForEach(x => hash.Add(x)); - - stringMap.Remove("two"); - hash.Remove("two"); - - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(stringMap)); - } - - private static Dictionary ToStringMap(Dictionary stringIntMap) - { - var map = new Dictionary(); - foreach (var kvp in stringIntMap) - { - map[kvp.Key] = kvp.Value.ToString(); - } - return map; - } - - [Test] - public void Can_increment_Hash_field() - { - var hash = Redis.Hashes[HashId]; - stringIntMap.ForEach(x => hash.Add(x.Key, x.Value.ToString())); - - stringIntMap["two"] += 10; - Redis.IncrementValueInHash(HashId, "two", 10); - - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(ToStringMap(stringIntMap))); - } - - [Test] - public void Can_SetItemInHashIfNotExists() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - - Redis.SetEntryInHashIfNotExists(HashId, "two", "did not change existing item"); - Redis.SetEntryInHashIfNotExists(HashId, "five", "changed non existing item"); - stringMap["five"] = "changed non existing item"; - - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(stringMap)); - } - - [Test] - public void Can_SetRangeInHash() - { - var newStringMap = new Dictionary { - {"five","e"}, {"six","f"}, {"seven","g"} - }; - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - - Redis.SetRangeInHash(HashId, newStringMap); - - newStringMap.ForEach(x => stringMap.Add(x.Key, x.Value)); - - var members = Redis.GetAllEntriesFromHash(HashId); - Assert.That(members, Is.EquivalentTo(stringMap)); - } - - [Test] - public void Can_GetItemsFromHash() - { - stringMap.ForEach(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); - - var expectedValues = new List { stringMap["one"], stringMap["two"], null }; - var hashValues = Redis.GetValuesFromHash(HashId, "one", "two", "not-exists"); - - Assert.That(hashValues.EquivalentTo(expectedValues), Is.True); - } + [Test] + public void Can_Test_Contains_in_IDictionary_Hash() + { + var hash = Redis.Hashes[HashId]; + stringMap.Each(hash.Add); + + Assert.That(hash.ContainsKey("two"), Is.True); + Assert.That(hash.ContainsKey("five"), Is.False); + } + + [Test] + public void Can_Remove_value_from_IDictionary_Hash() + { + var hash = Redis.Hashes[HashId]; + stringMap.Each(hash.Add); + + stringMap.Remove("two"); + hash.Remove("two"); + + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + private static Dictionary ToStringMap(Dictionary stringIntMap) + { + var map = new Dictionary(); + foreach (var kvp in stringIntMap) + { + map[kvp.Key] = kvp.Value.ToString(); + } + return map; + } + + [Test] + public void Can_increment_Hash_field() + { + var hash = Redis.Hashes[HashId]; + stringIntMap.Each(x => hash.Add(x.Key, x.Value.ToString())); + + stringIntMap["two"] += 10; + Redis.IncrementValueInHash(HashId, "two", 10); + + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(ToStringMap(stringIntMap))); + } + + [Test] + public void Can_increment_Hash_field_beyond_32_bits() + { + Redis.SetEntryInHash(HashId, "int", Int32.MaxValue.ToString()); + Redis.IncrementValueInHash(HashId, "int", 1); + long actual = Int64.Parse(Redis.GetValueFromHash(HashId, "int")); + long expected = Int32.MaxValue + 1L; + Assert.That(actual, Is.EqualTo(expected)); + } + + [Test] + public void Can_SetItemInHashIfNotExists() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + Redis.SetEntryInHashIfNotExists(HashId, "two", "did not change existing item"); + Redis.SetEntryInHashIfNotExists(HashId, "five", "changed non existing item"); + stringMap["five"] = "changed non existing item"; + + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public void Can_SetRangeInHash() + { + var newStringMap = new Dictionary { + {"five","e"}, {"six","f"}, {"seven","g"} + }; + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + Redis.SetRangeInHash(HashId, newStringMap); + + newStringMap.Each(x => stringMap.Add(x.Key, x.Value)); + + var members = Redis.GetAllEntriesFromHash(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public void Can_GetItemsFromHash() + { + stringMap.Each(x => Redis.SetEntryInHash(HashId, x.Key, x.Value)); + + var expectedValues = new List { stringMap["one"], stringMap["two"], null }; + var hashValues = Redis.GetValuesFromHash(HashId, "one", "two", "not-exists"); + + Assert.That(hashValues.EquivalentTo(expectedValues), Is.True); + } [Test] public void Can_hash_set() { var key = HashId + "key"; var field = GetBytes("foo"); var value = GetBytes("value"); - Assert.AreEqual(Redis.HDel(key, field),0); - Assert.AreEqual(Redis.HSet(key, field, value),1); - Assert.AreEqual(Redis.HDel(key, field),1); + Assert.AreEqual(Redis.HDel(key, field), 0); + Assert.AreEqual(Redis.HSet(key, field, value), 1); + Assert.AreEqual(Redis.HDel(key, field), 1); } [Test] @@ -247,8 +260,8 @@ public void Can_hash_multi_set_and_get() { const string Key = HashId + "multitest"; Assert.That(Redis.GetValue(Key), Is.Null); - var fields = new Dictionary { {"field1", "1"},{"field2","2"}, {"field3","3"} }; - + var fields = new Dictionary { { "field1", "1" }, { "field2", "2" }, { "field3", "3" } }; + Redis.SetRangeInHash(Key, fields); var members = Redis.GetAllEntriesFromHash(Key); foreach (var member in members) @@ -258,6 +271,35 @@ public void Can_hash_multi_set_and_get() } } - } + public class HashTest + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public void Can_store_as_Hash() + { + var dto = new HashTest { Id = 1 }; + Redis.StoreAsHash(dto); + + var storedHash = Redis.GetHashKeys(dto.ToUrn()); + Assert.That(storedHash, Is.EquivalentTo(new[] { "Id" })); + + var hold = RedisClient.ConvertToHashFn; + RedisClient.ConvertToHashFn = o => + { + var map = new Dictionary(); + o.ToObjectDictionary().Each(x => map[x.Key] = (x.Value ?? "").ToJsv()); + return map; + }; + + Redis.StoreAsHash(dto); + storedHash = Redis.GetHashKeys(dto.ToUrn()); + Assert.That(storedHash, Is.EquivalentTo(new[] { "Id", "Name" })); + + RedisClient.ConvertToHashFn = hold; + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientListTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientListTests.Async.cs new file mode 100644 index 00000000..b68445a2 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientListTests.Async.cs @@ -0,0 +1,502 @@ +using System; +using System.Collections.Generic; +using NUnit.Framework; +using System.Linq; +using ServiceStack.Text; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientListTestsAsync + : RedisClientTestsBaseAsync + { + const string ListId = "rcl_testlist"; + const string ListId2 = "rcl_testlist2"; + private List storeMembers; + + public RedisClientListTestsAsync() + { + CleanMask = "rcl_testlist*"; + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + storeMembers = new List { "one", "two", "three", "four" }; + } + + private static void AssertAreEqual(List actualList, List expectedList) + { + Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); + var i = 0; + actualList.ForEach(x => Assert.That(x, Is.EqualTo(expectedList[i++]))); + } + + private static void AssertAreEqual(List actualList, Queue expectedList) + { + Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); + actualList.ForEach(x => Assert.That(x, Is.EqualTo(expectedList.Dequeue()))); + } + + [Test] + public async Task Can_PopAndPushItemBetweenLists() + { + await RedisAsync.AddItemToListAsync(ListId, "1"); + await RedisAsync.PopAndPushItemBetweenListsAsync(ListId, ListId2); + } + + [Test] + public async Task Can_BlockingPopAndPushItemBetweenLists() + { + await RedisAsync.AddItemToListAsync(ListId, "A"); + await RedisAsync.AddItemToListAsync(ListId, "B"); + var r = await RedisAsync.BlockingPopAndPushItemBetweenListsAsync(ListId, ListId2, new TimeSpan(0, 0, 1)); + + Assert.That(r, Is.EqualTo("B")); + } + + [Test] + public async Task Can_Timeout_BlockingPopAndPushItemBetweenLists() + { + var r = await RedisAsync.BlockingPopAndPushItemBetweenListsAsync(ListId, ListId2, new TimeSpan(0, 0, 1)); + Assert.That(r, Is.Null); + } + + [Test] + public async Task Can_AddToList_and_GetAllFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_AddRangeToList_and_GetAllFromList() + { + await RedisAsync.AddRangeToListAsync(ListId, storeMembers); + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_PrependRangeToList_and_GetAllFromList() + { + await RedisAsync.PrependRangeToListAsync(ListId, storeMembers); + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_GetListCount() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var listCount = await RedisAsync.GetListCountAsync(ListId); + + Assert.That(listCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Can_GetItemFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var storeMember3 = storeMembers[2]; + var item3 = await RedisAsync.GetItemFromListAsync(ListId, 2); + + Assert.That(item3, Is.EqualTo(storeMember3)); + } + + [Test] + public async Task Can_SetItemInList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + storeMembers[2] = "five"; + await RedisAsync.SetItemInListAsync(ListId, 2, "five"); + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_PopFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item4 = await RedisAsync.PopItemFromListAsync(ListId); + + Assert.That(item4, Is.EqualTo("four")); + } + + [Test] + public async Task Can_EnqueueOnList() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + while (queue.Count > 0) + { + var actual = await RedisAsync.DequeueItemFromListAsync(ListId); + Assert.That(actual, Is.EqualTo(queue.Dequeue())); + } + } + + [Test] + public async Task Can_DequeueFromList() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + var item1 = await RedisAsync.DequeueItemFromListAsync(ListId); + + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task PopAndPushSameAsDequeue() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + var item1 = await RedisAsync.PopAndPushItemBetweenListsAsync(ListId, ListId2); + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task Can_BlockingDequeueFromList() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + var item1 = await RedisAsync.BlockingDequeueItemFromListAsync(ListId, null); + + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task BlockingDequeueFromList_Can_TimeOut() + { + var item1 = await RedisAsync.BlockingDequeueItemFromListAsync(ListId, TimeSpan.FromSeconds(1)); + Assert.That(item1, Is.Null); + } + + [Test] + public async Task Can_PushToList() + { + var stack = new Stack(); + storeMembers.ForEach(stack.Push); + foreach (var x in storeMembers) + { + await RedisAsync.PushItemToListAsync(ListId, x); + } + + while (stack.Count > 0) + { + var actual = await RedisAsync.PopItemFromListAsync(ListId); + Assert.That(actual, Is.EqualTo(stack.Pop())); + } + } + + [Test] + public async Task Can_BlockingPopFromList() + { + var stack = new Stack(); + storeMembers.ForEach(stack.Push); + foreach (var x in storeMembers) + { + await RedisAsync.PushItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.BlockingPopItemFromListAsync(ListId, null); + + Assert.That(item1, Is.EqualTo(stack.Pop())); + } + + [Test] + public async Task BlockingPopFromList_Can_TimeOut() + { + var item1 = await RedisAsync.BlockingPopItemFromListAsync(ListId, TimeSpan.FromSeconds(1)); + Assert.That(item1, Is.Null); + } + + [Test] + public async Task Can_RemoveStartFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.RemoveStartFromListAsync(ListId); + + Assert.That(item1, Is.EqualTo(storeMembers.First())); + } + + [Test] + public async Task Can_RemoveEndFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.RemoveEndFromListAsync(ListId); + + Assert.That(item1, Is.EqualTo(storeMembers.Last())); + } + + [Test] + public async Task Can_BlockingRemoveStartFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.BlockingRemoveStartFromListAsync(ListId, null); + + Assert.That(item1, Is.EqualTo(storeMembers.First())); + } + + [Test] + public async Task Can_MoveBetweenLists() + { + var list1Members = new List { "one", "two", "three", "four" }; + var list2Members = new List { "five", "six", "seven" }; + const string item4 = "four"; + + foreach (var x in list1Members) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + foreach (var x in list2Members) + { + await RedisAsync.AddItemToListAsync(ListId2, x); + } + + list1Members.Remove(item4); + list2Members.Insert(0, item4); + await RedisAsync.PopAndPushItemBetweenListsAsync(ListId, ListId2); + + var readList1 = await RedisAsync.GetAllItemsFromListAsync(ListId); + var readList2 = await RedisAsync.GetAllItemsFromListAsync(ListId2); + + AssertAreEqual(readList1, list1Members); + AssertAreEqual(readList2, list2Members); + } + + + [Test] + public async Task Can_enumerate_small_list() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var readMembers = new List(); + await foreach (var item in RedisAsync.Lists[ListId]) + { + readMembers.Add(item); + } + AssertAreEqual(readMembers, storeMembers); + } + + [Test] + public async Task Can_enumerate_large_list() + { + if (TestConfig.IgnoreLongTests) return; + + const int listSize = 2500; + + storeMembers = new List(); + for (int x = 0; x < listSize; x++) + { + await RedisAsync.AddItemToListAsync(ListId, x.ToString()); + storeMembers.Add(x.ToString()); + } + + var members = new List(); + await foreach (var item in RedisAsync.Lists[ListId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + var members = await ToListAsync(list); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_Clear_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + Assert.That(await list.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await list.ClearAsync(); + + Assert.That(await list.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + Assert.That(await list.ContainsAsync("two"), Is.True); + Assert.That(await list.ContainsAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + storeMembers.Remove("two"); + await list.RemoveAsync("two"); + + var members = await ToListAsync(list); + + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_RemoveAt_value_from_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + storeMembers.RemoveAt(2); + await list.RemoveAtAsync(2); + + var members = await ToListAsync(list); + + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_get_default_index_from_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + for (var i = 0; i < storeMembers.Count; i++) + { + Assert.That(await list.ElementAtAsync(i), Is.EqualTo(storeMembers[i])); + } + } + + [Test] + public async Task Can_test_for_IndexOf_in_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + foreach (var item in storeMembers) + { + Assert.That(await list.IndexOfAsync(item), Is.EqualTo(storeMembers.IndexOf(item))); + } + } + + [Test] + public async Task Can_AddRangeToList_and_GetSortedItems() + { + await RedisAsync.PrependRangeToListAsync(ListId, storeMembers); + + var members = await RedisAsync.GetSortedItemsFromListAsync(ListId, new SortOptions { SortAlpha = true, SortDesc = true, Skip = 1, Take = 2 }); + AssertAreEqual(members, storeMembers.OrderByDescending(s => s).Skip(1).Take(2).ToList()); + } + + public class Test + { + public string A { get; set; } + } + + [Test] + public async Task RemoveAll_removes_all_items_from_Named_List() + { + var redis = RedisAsync.As(); + + var clientesRepo = redis.Lists["repo:Client:Test"]; + + Assert.IsTrue(await clientesRepo.CountAsync() == 0, "Count 1 = " + await clientesRepo.CountAsync()); + await clientesRepo.AddAsync(new Test() { A = "Test" }); + Assert.IsTrue(await clientesRepo.CountAsync() == 1, "Count 2 = " + await clientesRepo.CountAsync()); + await clientesRepo.RemoveAllAsync(); + Assert.IsTrue(await clientesRepo.CountAsync() == 0, "Count 3 = " + await clientesRepo.CountAsync()); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs index ab43133b..e7003a4b 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs @@ -1,8 +1,8 @@ using System; using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common.Extensions; using System.Linq; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests { @@ -35,7 +35,6 @@ private static void AssertAreEqual(List actualList, List expecte private static void AssertAreEqual(List actualList, Queue expectedList) { Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); - var i = 0; actualList.ForEach(x => Assert.That(x, Is.EqualTo(expectedList.Dequeue()))); } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSetTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientSetTests.Async.cs new file mode 100644 index 00000000..ad23cc5a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientSetTests.Async.cs @@ -0,0 +1,335 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientSetTestsAsync + : RedisClientTestsBaseAsync + { + private const string SetIdSuffix = "testset"; + private List storeMembers; + + private string SetId + { + get + { + return this.PrefixedKey(SetIdSuffix); + } + } + + [SetUp] + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "RedisClientSetTests"; + storeMembers = new List { "one", "two", "three", "four" }; + } + + [Test] + public async Task Can_AddToSet_and_GetAllFromSet() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var members = await RedisAsync.GetAllItemsFromSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_AddRangeToSet_and_GetAllFromSet() + { + await RedisAsync.AddRangeToSetAsync(SetId, storeMembers); + + var members = await RedisAsync.GetAllItemsFromSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveFromSet() + { + const string removeMember = "two"; + + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + await RedisAsync.RemoveItemFromSetAsync(SetId, removeMember); + + storeMembers.Remove(removeMember); + + var members = await RedisAsync.GetAllItemsFromSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_PopFromSet() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var member = await RedisAsync.PopItemFromSetAsync(SetId); + + Assert.That(storeMembers.Contains(member), Is.True); + } + + [Test] + public async Task Can_MoveBetweenSets() + { + string fromSetId = PrefixedKey("testmovefromset"); + string toSetId = PrefixedKey("testmovetoset"); + const string moveMember = "four"; + var fromSetIdMembers = new List { "one", "two", "three", "four" }; + var toSetIdMembers = new List { "five", "six", "seven" }; + + await fromSetIdMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(fromSetId, x)); + await toSetIdMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(toSetId, x)); + + await RedisAsync.MoveBetweenSetsAsync(fromSetId, toSetId, moveMember); + + fromSetIdMembers.Remove(moveMember); + toSetIdMembers.Add(moveMember); + + var readFromSetId = await RedisAsync.GetAllItemsFromSetAsync(fromSetId); + var readToSetId = await RedisAsync.GetAllItemsFromSetAsync(toSetId); + + Assert.That(readFromSetId, Is.EquivalentTo(fromSetIdMembers)); + Assert.That(readToSetId, Is.EquivalentTo(toSetIdMembers)); + } + + [Test] + public async Task Can_GetSetCount() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var setCount = await RedisAsync.GetSetCountAsync(SetId); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Does_SetContainsValue() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + Assert.That(await RedisAsync.SetContainsItemAsync(SetId, existingMember), Is.True); + Assert.That(await RedisAsync.SetContainsItemAsync(SetId, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + var intersectingMembers = await RedisAsync.GetIntersectFromSetsAsync(new[] { set1Name, set2Name }); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public async Task Can_Store_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + string storeSetName = PrefixedKey("testintersectsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + await RedisAsync.StoreIntersectFromSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var intersectingMembers = await RedisAsync.GetAllItemsFromSetAsync(storeSetName); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public async Task Can_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + var unionMembers = await RedisAsync.GetUnionFromSetsAsync(new[] { set1Name, set2Name }); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public async Task Can_Store_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + string storeSetName = PrefixedKey("testunionsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + await RedisAsync.StoreUnionFromSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var unionMembers = await RedisAsync.GetAllItemsFromSetAsync(storeSetName); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public async Task Can_DiffBetweenSets() + { + string set1Name = PrefixedKey("testdiffset1"); + string set2Name = PrefixedKey("testdiffset2"); + string set3Name = PrefixedKey("testdiffset3"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + var set3Members = new List { "one", "five", "seven", "eleven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + await set3Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set3Name, x)); + + var diffMembers = await RedisAsync.GetDifferencesFromSetAsync(set1Name, new[] { set2Name, set3Name }); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { "two", "three" })); + } + + [Test] + public async Task Can_Store_DiffBetweenSets() + { + string set1Name = PrefixedKey("testdiffset1"); + string set2Name = PrefixedKey("testdiffset2"); + string set3Name = PrefixedKey("testdiffset3"); + string storeSetName = PrefixedKey("testdiffsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + var set3Members = new List { "one", "five", "seven", "eleven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + await set3Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set3Name, x)); + + await RedisAsync.StoreDifferencesFromSetAsync(storeSetName, set1Name, new[] { set2Name, set3Name }); + + var diffMembers = await RedisAsync.GetAllItemsFromSetAsync(storeSetName); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { "two", "three" })); + } + + [Test] + public async Task Can_GetRandomEntryFromSet() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var randomEntry = await RedisAsync.GetRandomItemFromSetAsync(SetId); + + Assert.That(storeMembers.Contains(randomEntry), Is.True); + } + + + [Test] + public async Task Can_enumerate_small_ICollection_Set() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var members = new List(); + await foreach (var item in RedisAsync.Sets[SetId]) + { + members.Add(item); + } + members.Sort(); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + storeMembers = new List(); + await setSize.TimesAsync(async x => + { + await RedisAsync.AddItemToSetAsync(SetId, x.ToString()); + storeMembers.Add(x.ToString()); + }); + + var members = new List(); + await foreach (var item in RedisAsync.Sets[SetId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + var members = await list.ToListAsync(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Clear_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + Assert.That(await list.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await list.ClearAsync(); + + Assert.That(await list.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + Assert.That(await list.ContainsAsync("two"), Is.True); + Assert.That(await list.ContainsAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + storeMembers.Remove("two"); + await list.RemoveAsync("two"); + + var members = await list.ToListAsync(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSetTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientSetTests.cs index 90cea83e..28093968 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientSetTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientSetTests.cs @@ -1,333 +1,334 @@ using System.Collections.Generic; +using System.Linq; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisClientSetTests - : RedisClientTestsBase - { - private const string SetIdSuffix = "testset"; - private List storeMembers; - - private string SetId - { - get - { - return this.PrefixedKey(SetIdSuffix); - } - } - - [SetUp] - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); - Redis.NamespacePrefix = "RedisClientSetTests"; - storeMembers = new List { "one", "two", "three", "four" }; - } - - [Test] - public void Can_AddToSet_and_GetAllFromSet() - { - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - var members = Redis.GetAllItemsFromSet(SetId); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_AddRangeToSet_and_GetAllFromSet() - { - Redis.AddRangeToSet(SetId, storeMembers); - - var members = Redis.GetAllItemsFromSet(SetId); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_RemoveFromSet() - { - const string removeMember = "two"; - - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - Redis.RemoveItemFromSet(SetId, removeMember); - - storeMembers.Remove(removeMember); - - var members = Redis.GetAllItemsFromSet(SetId); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_PopFromSet() - { - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - var member = Redis.PopItemFromSet(SetId); - - Assert.That(storeMembers.Contains(member), Is.True); - } - - [Test] - public void Can_MoveBetweenSets() - { - string fromSetId = PrefixedKey("testmovefromset"); - string toSetId = PrefixedKey("testmovetoset"); - const string moveMember = "four"; - var fromSetIdMembers = new List { "one", "two", "three", "four" }; - var toSetIdMembers = new List { "five", "six", "seven" }; - - fromSetIdMembers.ForEach(x => Redis.AddItemToSet(fromSetId, x)); - toSetIdMembers.ForEach(x => Redis.AddItemToSet(toSetId, x)); - - Redis.MoveBetweenSets(fromSetId, toSetId, moveMember); - - fromSetIdMembers.Remove(moveMember); - toSetIdMembers.Add(moveMember); - - var readFromSetId = Redis.GetAllItemsFromSet(fromSetId); - var readToSetId = Redis.GetAllItemsFromSet(toSetId); - - Assert.That(readFromSetId, Is.EquivalentTo(fromSetIdMembers)); - Assert.That(readToSetId, Is.EquivalentTo(toSetIdMembers)); - } - - [Test] - public void Can_GetSetCount() - { - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - var setCount = Redis.GetSetCount(SetId); - - Assert.That(setCount, Is.EqualTo(storeMembers.Count)); - } - - [Test] - public void Does_SetContainsValue() - { - const string existingMember = "two"; - const string nonExistingMember = "five"; - - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - Assert.That(Redis.SetContainsItem(SetId, existingMember), Is.True); - Assert.That(Redis.SetContainsItem(SetId, nonExistingMember), Is.False); - } - - [Test] - public void Can_IntersectBetweenSets() - { - string set1Name = PrefixedKey("testintersectset1"); - string set2Name = PrefixedKey("testintersectset2"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; - - set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); + [TestFixture] + public class RedisClientSetTests + : RedisClientTestsBase + { + private const string SetIdSuffix = "testset"; + private List storeMembers; + + private string SetId + { + get + { + return this.PrefixedKey(SetIdSuffix); + } + } + + [SetUp] + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + Redis.NamespacePrefix = "RedisClientSetTests"; + storeMembers = new List { "one", "two", "three", "four" }; + } + + [Test] + public void Can_AddToSet_and_GetAllFromSet() + { + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + var members = Redis.GetAllItemsFromSet(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_AddRangeToSet_and_GetAllFromSet() + { + Redis.AddRangeToSet(SetId, storeMembers); + + var members = Redis.GetAllItemsFromSet(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_RemoveFromSet() + { + const string removeMember = "two"; + + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + Redis.RemoveItemFromSet(SetId, removeMember); + + storeMembers.Remove(removeMember); + + var members = Redis.GetAllItemsFromSet(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_PopFromSet() + { + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + var member = Redis.PopItemFromSet(SetId); + + Assert.That(storeMembers.Contains(member), Is.True); + } + + [Test] + public void Can_MoveBetweenSets() + { + string fromSetId = PrefixedKey("testmovefromset"); + string toSetId = PrefixedKey("testmovetoset"); + const string moveMember = "four"; + var fromSetIdMembers = new List { "one", "two", "three", "four" }; + var toSetIdMembers = new List { "five", "six", "seven" }; + + fromSetIdMembers.ForEach(x => Redis.AddItemToSet(fromSetId, x)); + toSetIdMembers.ForEach(x => Redis.AddItemToSet(toSetId, x)); + + Redis.MoveBetweenSets(fromSetId, toSetId, moveMember); + + fromSetIdMembers.Remove(moveMember); + toSetIdMembers.Add(moveMember); + + var readFromSetId = Redis.GetAllItemsFromSet(fromSetId); + var readToSetId = Redis.GetAllItemsFromSet(toSetId); + + Assert.That(readFromSetId, Is.EquivalentTo(fromSetIdMembers)); + Assert.That(readToSetId, Is.EquivalentTo(toSetIdMembers)); + } + + [Test] + public void Can_GetSetCount() + { + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + var setCount = Redis.GetSetCount(SetId); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public void Does_SetContainsValue() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + Assert.That(Redis.SetContainsItem(SetId, existingMember), Is.True); + Assert.That(Redis.SetContainsItem(SetId, nonExistingMember), Is.False); + } + + [Test] + public void Can_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); - var intersectingMembers = Redis.GetIntersectFromSets(set1Name, set2Name); + var intersectingMembers = Redis.GetIntersectFromSets(set1Name, set2Name); - Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); - } + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } - [Test] - public void Can_Store_IntersectBetweenSets() - { - string set1Name = PrefixedKey("testintersectset1"); - string set2Name = PrefixedKey("testintersectset2"); - string storeSetName = PrefixedKey("testintersectsetstore"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; + [Test] + public void Can_Store_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + string storeSetName = PrefixedKey("testintersectsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; - set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); + set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); - Redis.StoreIntersectFromSets(storeSetName, set1Name, set2Name); + Redis.StoreIntersectFromSets(storeSetName, set1Name, set2Name); - var intersectingMembers = Redis.GetAllItemsFromSet(storeSetName); + var intersectingMembers = Redis.GetAllItemsFromSet(storeSetName); - Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); - } + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } - [Test] - public void Can_UnionBetweenSets() - { - string set1Name = PrefixedKey("testunionset1"); - string set2Name = PrefixedKey("testunionset2"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; - - set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); - - var unionMembers = Redis.GetUnionFromSets(set1Name, set2Name); + [Test] + public void Can_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); + + var unionMembers = Redis.GetUnionFromSets(set1Name, set2Name); - Assert.That(unionMembers, Is.EquivalentTo( - new List { "one", "two", "three", "four", "five", "six", "seven" })); - } - - [Test] - public void Can_Store_UnionBetweenSets() - { - string set1Name = PrefixedKey("testunionset1"); - string set2Name = PrefixedKey("testunionset2"); - string storeSetName = PrefixedKey("testunionsetstore"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; - - set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); - - Redis.StoreUnionFromSets(storeSetName, set1Name, set2Name); - - var unionMembers = Redis.GetAllItemsFromSet(storeSetName); - - Assert.That(unionMembers, Is.EquivalentTo( - new List { "one", "two", "three", "four", "five", "six", "seven" })); - } - - [Test] - public void Can_DiffBetweenSets() - { - string set1Name = PrefixedKey("testdiffset1"); - string set2Name = PrefixedKey("testdiffset2"); - string set3Name = PrefixedKey("testdiffset3"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; - var set3Members = new List { "one", "five", "seven", "eleven" }; - - set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); - set3Members.ForEach(x => Redis.AddItemToSet(set3Name, x)); - - var diffMembers = Redis.GetDifferencesFromSet(set1Name, set2Name, set3Name); - - Assert.That(diffMembers, Is.EquivalentTo( - new List { "two", "three" })); - } - - [Test] - public void Can_Store_DiffBetweenSets() - { - string set1Name = PrefixedKey("testdiffset1"); - string set2Name = PrefixedKey("testdiffset2"); - string set3Name = PrefixedKey("testdiffset3"); - string storeSetName = PrefixedKey("testdiffsetstore"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; - var set3Members = new List { "one", "five", "seven", "eleven" }; - - set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); - set3Members.ForEach(x => Redis.AddItemToSet(set3Name, x)); - - Redis.StoreDifferencesFromSet(storeSetName, set1Name, set2Name, set3Name); - - var diffMembers = Redis.GetAllItemsFromSet(storeSetName); - - Assert.That(diffMembers, Is.EquivalentTo( - new List { "two", "three" })); - } - - [Test] - public void Can_GetRandomEntryFromSet() - { - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - var randomEntry = Redis.GetRandomItemFromSet(SetId); - - Assert.That(storeMembers.Contains(randomEntry), Is.True); - } - - - [Test] - public void Can_enumerate_small_ICollection_Set() - { - storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); - - var members = new List(); - foreach (var item in Redis.Sets[SetId]) - { - members.Add(item); - } - members.Sort(); - Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_enumerate_large_ICollection_Set() - { - if (TestConfig.IgnoreLongTests) return; - - const int setSize = 2500; - - storeMembers = new List(); - setSize.Times(x => - { - Redis.AddItemToSet(SetId, x.ToString()); - storeMembers.Add(x.ToString()); - }); - - var members = new List(); - foreach (var item in Redis.Sets[SetId]) - { - members.Add(item); - } - members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); - Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_Add_to_ICollection_Set() - { - var list = Redis.Sets[SetId]; - storeMembers.ForEach(list.Add); - - var members = list.ToList(); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_Clear_ICollection_Set() - { - var list = Redis.Sets[SetId]; - storeMembers.ForEach(list.Add); - - Assert.That(list.Count, Is.EqualTo(storeMembers.Count)); - - list.Clear(); - - Assert.That(list.Count, Is.EqualTo(0)); - } - - [Test] - public void Can_Test_Contains_in_ICollection_Set() - { - var list = Redis.Sets[SetId]; - storeMembers.ForEach(list.Add); - - Assert.That(list.Contains("two"), Is.True); - Assert.That(list.Contains("five"), Is.False); - } - - [Test] - public void Can_Remove_value_from_ICollection_Set() - { - var list = Redis.Sets[SetId]; - storeMembers.ForEach(list.Add); - - storeMembers.Remove("two"); - list.Remove("two"); + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public void Can_Store_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + string storeSetName = PrefixedKey("testunionsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); + + Redis.StoreUnionFromSets(storeSetName, set1Name, set2Name); + + var unionMembers = Redis.GetAllItemsFromSet(storeSetName); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public void Can_DiffBetweenSets() + { + string set1Name = PrefixedKey("testdiffset1"); + string set2Name = PrefixedKey("testdiffset2"); + string set3Name = PrefixedKey("testdiffset3"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + var set3Members = new List { "one", "five", "seven", "eleven" }; + + set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); + set3Members.ForEach(x => Redis.AddItemToSet(set3Name, x)); + + var diffMembers = Redis.GetDifferencesFromSet(set1Name, set2Name, set3Name); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { "two", "three" })); + } + + [Test] + public void Can_Store_DiffBetweenSets() + { + string set1Name = PrefixedKey("testdiffset1"); + string set2Name = PrefixedKey("testdiffset2"); + string set3Name = PrefixedKey("testdiffset3"); + string storeSetName = PrefixedKey("testdiffsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + var set3Members = new List { "one", "five", "seven", "eleven" }; + + set1Members.ForEach(x => Redis.AddItemToSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSet(set2Name, x)); + set3Members.ForEach(x => Redis.AddItemToSet(set3Name, x)); + + Redis.StoreDifferencesFromSet(storeSetName, set1Name, set2Name, set3Name); + + var diffMembers = Redis.GetAllItemsFromSet(storeSetName); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { "two", "three" })); + } + + [Test] + public void Can_GetRandomEntryFromSet() + { + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + var randomEntry = Redis.GetRandomItemFromSet(SetId); + + Assert.That(storeMembers.Contains(randomEntry), Is.True); + } + + + [Test] + public void Can_enumerate_small_ICollection_Set() + { + storeMembers.ForEach(x => Redis.AddItemToSet(SetId, x)); + + var members = new List(); + foreach (var item in Redis.Sets[SetId]) + { + members.Add(item); + } + members.Sort(); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + storeMembers = new List(); + setSize.Times(x => + { + Redis.AddItemToSet(SetId, x.ToString()); + storeMembers.Add(x.ToString()); + }); + + var members = new List(); + foreach (var item in Redis.Sets[SetId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_Add_to_ICollection_Set() + { + var list = Redis.Sets[SetId]; + storeMembers.ForEach(list.Add); + + var members = list.ToList(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_Clear_ICollection_Set() + { + var list = Redis.Sets[SetId]; + storeMembers.ForEach(list.Add); + + Assert.That(list.Count, Is.EqualTo(storeMembers.Count)); + + list.Clear(); + + Assert.That(list.Count, Is.EqualTo(0)); + } + + [Test] + public void Can_Test_Contains_in_ICollection_Set() + { + var list = Redis.Sets[SetId]; + storeMembers.ForEach(list.Add); + + Assert.That(list.Contains("two"), Is.True); + Assert.That(list.Contains("five"), Is.False); + } + + [Test] + public void Can_Remove_value_from_ICollection_Set() + { + var list = Redis.Sets[SetId]; + storeMembers.ForEach(list.Add); + + storeMembers.Remove("two"); + list.Remove("two"); - var members = list.ToList(); - - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - } + var members = list.ToList(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.Async.cs new file mode 100644 index 00000000..ae31d2d3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.Async.cs @@ -0,0 +1,454 @@ +using NUnit.Framework; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisClientSortedSetTestsAsync + : RedisClientTestsBaseAsync + { + private const string SetIdSuffix = "testzset"; + private List storeMembers; + + private string SetId + { + get + { + return PrefixedKey(SetIdSuffix); + } + } + + Dictionary stringDoubleMap; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "RedisClientSortedSetTests"; + storeMembers = new List { "one", "two", "three", "four" }; + + stringDoubleMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + } + + [Test] + public async Task Can_AddItemToSortedSet_and_GetAllFromSet() + { + var i = 0; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members.EquivalentTo(storeMembers), Is.True); + } + + [Test] + public async Task Can_AddRangeToSortedSet_and_GetAllFromSet() + { + var success = await RedisAsync.AddRangeToSortedSetAsync(SetId, storeMembers, 1); + Assert.That(success, Is.True); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task AddToSet_without_score_adds_an_implicit_lexical_order_score() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + Assert.That(members.EquivalentTo(storeMembers), Is.True); + } + + [Test] + public async Task AddToSet_with_same_score_is_still_returned_in_lexical_order_score() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, 1)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + Assert.That(members.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveFromSet() + { + const string removeMember = "two"; + + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + await RedisAsync.RemoveItemFromSortedSetAsync(SetId, removeMember); + + storeMembers.Remove(removeMember); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveItemsFromSortedSet() + { + var removeMembers = new[] { "two" , "four", "six" }; + + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var removeCount = await RedisAsync.RemoveItemsFromSortedSetAsync(SetId, removeMembers.ToList()); + Assert.That(removeCount, Is.EqualTo(2)); + + removeMembers.Each(x => storeMembers.Remove(x)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_PopFromSet() + { + var i = 0; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + var member = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync(SetId); + + Assert.That(member, Is.EqualTo("four")); + } + + [Test] + public async Task Can_GetSetCount() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var setCount = await RedisAsync.GetSortedSetCountAsync(SetId); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Can_GetSetCountByScores() + { + var scores = new List(); + + await storeMembers.ForEachAsync(async x => + { + await RedisAsync.AddItemToSortedSetAsync(SetId, x); + scores.Add(RedisClient.GetLexicalScore(x)); + }); + + Assert.That(await RedisAsync.GetSortedSetCountAsync(SetId, scores.Min(), scores.Max()), Is.EqualTo(storeMembers.Count())); + Assert.That(await RedisAsync.GetSortedSetCountAsync(SetId, scores.Min(), scores.Min()), Is.EqualTo(1)); + } + + [Test] + public async Task Does_SortedSetContainsValue() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + Assert.That(await RedisAsync.SortedSetContainsItemAsync(SetId, existingMember), Is.True); + Assert.That(await RedisAsync.SortedSetContainsItemAsync(SetId, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_GetItemIndexInSortedSet_in_Asc_and_Desc() + { + var i = 10; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "one"), Is.EqualTo(0)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "two"), Is.EqualTo(1)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "three"), Is.EqualTo(2)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "four"), Is.EqualTo(3)); + + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "one"), Is.EqualTo(3)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "two"), Is.EqualTo(2)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "three"), Is.EqualTo(1)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "four"), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Store_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + string storeSetName = PrefixedKey("testintersectsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set1Name, x)); + await set2Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set2Name, x)); + + await RedisAsync.StoreIntersectFromSortedSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var intersectingMembers = await RedisAsync.GetAllItemsFromSortedSetAsync(storeSetName); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public async Task Can_Store_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + string storeSetName = PrefixedKey("testunionsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set1Name, x)); + await set2Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set2Name, x)); + + await RedisAsync.StoreUnionFromSortedSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var unionMembers = await RedisAsync.GetAllItemsFromSortedSetAsync(storeSetName); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public async Task Can_pop_items_with_lowest_and_highest_scores_from_sorted_set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + + var lowestScore = await RedisAsync.PopItemWithLowestScoreFromSortedSetAsync(SetId); + Assert.That(lowestScore, Is.EqualTo(storeMembers.First())); + + var highestScore = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync(SetId); + Assert.That(highestScore, Is.EqualTo(storeMembers[storeMembers.Count - 1])); + } + + [Test, Ignore("seems unstable?")] + public async Task Can_GetRangeFromSortedSetByLowestScore_from_sorted_set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + var memberRage = storeMembers.Where(x => + x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); + + var range = await RedisAsync.GetRangeFromSortedSetByLowestScoreAsync(SetId, "four", "three"); + Assert.That(range.EquivalentTo(memberRage)); + } + + [Test] + public async Task Can_IncrementItemInSortedSet() + { + await stringDoubleMap.ForEachAsync(async (k,v) => await RedisAsync.AddItemToSortedSetAsync(SetId, k, v)); + + var currentScore = await RedisAsync.IncrementItemInSortedSetAsync(SetId, "one", 3); + stringDoubleMap["one"] = stringDoubleMap["one"] + 3; + Assert.That(currentScore, Is.EqualTo(stringDoubleMap["one"])); + + currentScore = await RedisAsync.IncrementItemInSortedSetAsync(SetId, "four", -3); + stringDoubleMap["four"] = stringDoubleMap["four"] - 3; + Assert.That(currentScore, Is.EqualTo(stringDoubleMap["four"])); + + var map = await RedisAsync.GetAllWithScoresFromSortedSetAsync(SetId); + + Assert.That(stringDoubleMap.UnorderedEquivalentTo(map)); + } + + [Test] + public async Task Can_WorkInSortedSetUnderDifferentCulture() + { +#if NETCORE + var prevCulture = CultureInfo.CurrentCulture; + CultureInfo.CurrentCulture = new CultureInfo("ru-RU"); +#else + var prevCulture = Thread.CurrentThread.CurrentCulture; + Thread.CurrentThread.CurrentCulture = CultureInfo.CreateSpecificCulture("ru-RU"); +#endif + await RedisAsync.AddItemToSortedSetAsync(SetId, "key", 123.22); + + var map = await RedisAsync.GetAllWithScoresFromSortedSetAsync(SetId); + + Assert.AreEqual(123.22, map["key"]); + +#if NETCORE + CultureInfo.CurrentCulture = prevCulture; +#else + Thread.CurrentThread.CurrentCulture = prevCulture; +#endif + } + + + [Ignore("Not implemented yet")] + [Test] + public async Task Can_GetRangeFromSortedSetByHighestScore_from_sorted_set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + storeMembers.Sort((x, y) => y.CompareTo(x)); + var memberRage = storeMembers.Where(x => + x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); + + var range = await RedisAsync.GetRangeFromSortedSetByHighestScoreAsync(SetId, "four", "three"); + Assert.That(range.EquivalentTo(memberRage)); + } + + [Test] + public async Task Can_get_index_and_score_from_SortedSet() + { + storeMembers = new List { "a", "b", "c", "d" }; + const double initialScore = 10d; + var i = initialScore; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "a"), Is.EqualTo(0)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "a"), Is.EqualTo(storeMembers.Count - 1)); + + Assert.That(await RedisAsync.GetItemScoreInSortedSetAsync(SetId, "a"), Is.EqualTo(initialScore)); + Assert.That(await RedisAsync.GetItemScoreInSortedSetAsync(SetId, "d"), Is.EqualTo(initialScore + storeMembers.Count - 1)); + } + + [Test] + public async Task Can_enumerate_small_ICollection_Set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var members = new List(); + await foreach (var item in RedisAsync.SortedSets[SetId]) + { + members.Add(item); + } + members.Sort(); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + storeMembers = new List(); + await setSize.TimesAsync(async x => + { + await RedisAsync.AddItemToSortedSetAsync(SetId, x.ToString()); + storeMembers.Add(x.ToString()); + }); + + var members = new List(); + await foreach (var item in RedisAsync.SortedSets[SetId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + var members = await list.ToListAsync(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Clear_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + Assert.That(await list.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await list.ClearAsync(); + + Assert.That(await list.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + Assert.That(await list.ContainsAsync("two"), Is.True); + Assert.That(await list.ContainsAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + storeMembers.Remove("two"); + await list.RemoveAsync("two"); + + var members = await list.ToListAsync(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Score_from_non_existent_item_returns_NaN() + { + var score = await RedisAsync.GetItemScoreInSortedSetAsync("nonexistentset", "value"); + + Assert.That(score, Is.EqualTo(Double.NaN)); + } + + [Test] + public async Task Can_add_large_score_to_sortedset() + { + await RedisAsync.AddItemToSortedSetAsync(SetId, "value", 12345678901234567890d); + var score = await RedisAsync.GetItemScoreInSortedSetAsync(SetId, "value"); + + Assert.That(score, Is.EqualTo(12345678901234567890d)); + } + + public class Article + { + public int Id { get; set; } + public string Title { get; set; } + public DateTime ModifiedDate { get; set; } + } + + [Test] + public async Task Can_use_SortedIndex_to_store_articles_by_Date() + { + var redisArticles = RedisAsync.As
(); + + var articles = new[] + { + new Article { Id = 1, Title = "Article 1", ModifiedDate = new DateTime(2015, 01, 02) }, + new Article { Id = 2, Title = "Article 2", ModifiedDate = new DateTime(2015, 01, 01) }, + new Article { Id = 3, Title = "Article 3", ModifiedDate = new DateTime(2015, 01, 03) }, + }; + + await redisArticles.StoreAllAsync(articles); + + const string LatestArticlesSet = "urn:Article:modified"; + + foreach (var article in articles) + { + await RedisAsync.AddItemToSortedSetAsync(LatestArticlesSet, article.Id.ToString(), article.ModifiedDate.Ticks); + } + + var articleIds = await RedisAsync.GetAllItemsFromSortedSetDescAsync(LatestArticlesSet); + articleIds.PrintDump(); + + var latestArticles = await redisArticles.GetByIdsAsync(articleIds); + latestArticles.PrintDump(); + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs index f0458d40..bfcb695d 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs @@ -5,390 +5,451 @@ using System.Linq; using System.Threading; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class RedisClientSortedSetTests - : RedisClientTestsBase - { - private const string SetIdSuffix = "testzset"; - private List storeMembers; + [TestFixture, Category("Integration")] + public class RedisClientSortedSetTests + : RedisClientTestsBase + { + private const string SetIdSuffix = "testzset"; + private List storeMembers; + + private string SetId + { + get + { + return PrefixedKey(SetIdSuffix); + } + } - private string SetId - { - get - { - return PrefixedKey(SetIdSuffix); - } - } - - Dictionary stringDoubleMap; - - public override void OnBeforeEachTest() - { - base.OnBeforeEachTest(); - Redis.NamespacePrefix = "RedisClientSortedSetTests"; - storeMembers = new List { "one", "two", "three", "four" }; - - stringDoubleMap = new Dictionary { - {"one",1}, {"two",2}, {"three",3}, {"four",4} - }; - } - - [Test] - public void Can_AddItemToSortedSet_and_GetAllFromSet() - { - var i = 0; - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); - - var members = Redis.GetAllItemsFromSortedSet(SetId); - Assert.That(members.EquivalentTo(storeMembers), Is.True); - } - - [Test] - public void Can_AddRangeToSortedSet_and_GetAllFromSet() - { - var success = Redis.AddRangeToSortedSet(SetId, storeMembers, 1); - Assert.That(success, Is.True); - - var members = Redis.GetAllItemsFromSortedSet(SetId); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void AddToSet_without_score_adds_an_implicit_lexical_order_score() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - - var members = Redis.GetAllItemsFromSortedSet(SetId); - - storeMembers.Sort((x, y) => x.CompareTo(y)); - Assert.That(members.EquivalentTo(storeMembers), Is.True); - } - - [Test] - public void AddToSet_with_same_score_is_still_returned_in_lexical_order_score() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, 1)); - - var members = Redis.GetAllItemsFromSortedSet(SetId); - - storeMembers.Sort((x, y) => x.CompareTo(y)); - Assert.That(members.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_RemoveFromSet() - { - const string removeMember = "two"; - - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - - Redis.RemoveItemFromSortedSet(SetId, removeMember); - - storeMembers.Remove(removeMember); - - var members = Redis.GetAllItemsFromSortedSet(SetId); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_PopFromSet() - { - var i = 0; - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); - - var member = Redis.PopItemWithHighestScoreFromSortedSet(SetId); - - Assert.That(member, Is.EqualTo("four")); - } - - [Test] - public void Can_GetSetCount() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - - var setCount = Redis.GetSortedSetCount(SetId); - - Assert.That(setCount, Is.EqualTo(storeMembers.Count)); - } - - [Test] - public void Can_GetSetCountByScores() - { - var scores = new List(); - - storeMembers.ForEach(x => - { - Redis.AddItemToSortedSet(SetId, x); - scores.Add(RedisClient.GetLexicalScore(x)); - }); + Dictionary stringDoubleMap; - Assert.That(Redis.GetSortedSetCount(SetId, scores.Min(), scores.Max()), Is.EqualTo(storeMembers.Count())); - Assert.That(Redis.GetSortedSetCount(SetId, scores.Min(), scores.Min()), Is.EqualTo(1)); - } + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + Redis.NamespacePrefix = "RedisClientSortedSetTests"; + storeMembers = new List { "one", "two", "three", "four" }; - [Test] - public void Does_SortedSetContainsValue() - { - const string existingMember = "two"; - const string nonExistingMember = "five"; + stringDoubleMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + } - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + [Test] + public void Can_AddItemToSortedSet_and_GetAllFromSet() + { + var i = 0; + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); - Assert.That(Redis.SortedSetContainsItem(SetId, existingMember), Is.True); - Assert.That(Redis.SortedSetContainsItem(SetId, nonExistingMember), Is.False); - } + var members = Redis.GetAllItemsFromSortedSet(SetId); + Assert.That(members.EquivalentTo(storeMembers), Is.True); + } - [Test] - public void Can_GetItemIndexInSortedSet_in_Asc_and_Desc() - { - var i = 10; - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); + [Test] + public void Can_AddRangeToSortedSet_and_GetAllFromSet() + { + var success = Redis.AddRangeToSortedSet(SetId, storeMembers, 1); + Assert.That(success, Is.True); - Assert.That(Redis.GetItemIndexInSortedSet(SetId, "one"), Is.EqualTo(0)); - Assert.That(Redis.GetItemIndexInSortedSet(SetId, "two"), Is.EqualTo(1)); - Assert.That(Redis.GetItemIndexInSortedSet(SetId, "three"), Is.EqualTo(2)); - Assert.That(Redis.GetItemIndexInSortedSet(SetId, "four"), Is.EqualTo(3)); + var members = Redis.GetAllItemsFromSortedSet(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void AddToSet_without_score_adds_an_implicit_lexical_order_score() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + + var members = Redis.GetAllItemsFromSortedSet(SetId); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + Assert.That(members.EquivalentTo(storeMembers), Is.True); + } + + [Test] + public void AddToSet_with_same_score_is_still_returned_in_lexical_order_score() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, 1)); - Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "one"), Is.EqualTo(3)); - Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "two"), Is.EqualTo(2)); - Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "three"), Is.EqualTo(1)); - Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "four"), Is.EqualTo(0)); - } + var members = Redis.GetAllItemsFromSortedSet(SetId); - [Test] - public void Can_Store_IntersectBetweenSets() - { - string set1Name = PrefixedKey("testintersectset1"); - string set2Name = PrefixedKey("testintersectset2"); - string storeSetName = PrefixedKey("testintersectsetstore"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; + storeMembers.Sort((x, y) => x.CompareTo(y)); + Assert.That(members.EquivalentTo(storeMembers)); + } - set1Members.ForEach(x => Redis.AddItemToSortedSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSortedSet(set2Name, x)); + [Test] + public void Can_RemoveFromSet() + { + const string removeMember = "two"; - Redis.StoreIntersectFromSortedSets(storeSetName, set1Name, set2Name); + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - var intersectingMembers = Redis.GetAllItemsFromSortedSet(storeSetName); + Redis.RemoveItemFromSortedSet(SetId, removeMember); - Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); - } + storeMembers.Remove(removeMember); - [Test] - public void Can_Store_UnionBetweenSets() - { - string set1Name = PrefixedKey("testunionset1"); - string set2Name = PrefixedKey("testunionset2"); - string storeSetName = PrefixedKey("testunionsetstore"); - var set1Members = new List { "one", "two", "three", "four", "five" }; - var set2Members = new List { "four", "five", "six", "seven" }; + var members = Redis.GetAllItemsFromSortedSet(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } - set1Members.ForEach(x => Redis.AddItemToSortedSet(set1Name, x)); - set2Members.ForEach(x => Redis.AddItemToSortedSet(set2Name, x)); + [Test] + public void Can_RemoveItemsFromSortedSet() + { + var removeMembers = new[] { "two" , "four", "six" }; - Redis.StoreUnionFromSortedSets(storeSetName, set1Name, set2Name); + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - var unionMembers = Redis.GetAllItemsFromSortedSet(storeSetName); + var removeCount = Redis.RemoveItemsFromSortedSet(SetId, removeMembers.ToList()); + Assert.That(removeCount, Is.EqualTo(2)); - Assert.That(unionMembers, Is.EquivalentTo( - new List { "one", "two", "three", "four", "five", "six", "seven" })); - } + removeMembers.Each(x => storeMembers.Remove(x)); - [Test] - public void Can_pop_items_with_lowest_and_highest_scores_from_sorted_set() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + var members = Redis.GetAllItemsFromSortedSet(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } - storeMembers.Sort((x, y) => x.CompareTo(y)); + [Test] + public void Can_PopFromSet() + { + var i = 0; + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); - var lowestScore = Redis.PopItemWithLowestScoreFromSortedSet(SetId); - Assert.That(lowestScore, Is.EqualTo(storeMembers.First())); + var member = Redis.PopItemWithHighestScoreFromSortedSet(SetId); - var highestScore = Redis.PopItemWithHighestScoreFromSortedSet(SetId); - Assert.That(highestScore, Is.EqualTo(storeMembers[storeMembers.Count - 1])); - } + Assert.That(member, Is.EqualTo("four")); + } - [Test] - public void Can_GetRangeFromSortedSetByLowestScore_from_sorted_set() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - - storeMembers.Sort((x, y) => x.CompareTo(y)); - var memberRage = storeMembers.Where(x => - x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); - - var range = Redis.GetRangeFromSortedSetByLowestScore(SetId, "four", "three"); - Assert.That(range.EquivalentTo(memberRage)); - } + [Test] + public void Can_GetSetCount() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - [Test] - public void Can_IncrementItemInSortedSet() - { - stringDoubleMap.ForEach(x => Redis.AddItemToSortedSet(SetId, x.Key, x.Value)); + var setCount = Redis.GetSortedSetCount(SetId); - var currentScore = Redis.IncrementItemInSortedSet(SetId, "one", 2); - stringDoubleMap["one"] = stringDoubleMap["one"] + 2; - Assert.That(currentScore, Is.EqualTo(stringDoubleMap["one"])); + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } - currentScore = Redis.IncrementItemInSortedSet(SetId, "four", -2); - stringDoubleMap["four"] = stringDoubleMap["four"] - 2; - Assert.That(currentScore, Is.EqualTo(stringDoubleMap["four"])); + [Test] + public void Can_GetSetCountByScores() + { + var scores = new List(); - var map = Redis.GetAllWithScoresFromSortedSet(SetId); + storeMembers.ForEach(x => + { + Redis.AddItemToSortedSet(SetId, x); + scores.Add(RedisClient.GetLexicalScore(x)); + }); - Assert.That(stringDoubleMap.EquivalentTo(map)); - Debug.WriteLine(map.Dump()); - } + Assert.That(Redis.GetSortedSetCount(SetId, scores.Min(), scores.Max()), Is.EqualTo(storeMembers.Count())); + Assert.That(Redis.GetSortedSetCount(SetId, scores.Min(), scores.Min()), Is.EqualTo(1)); + } + + [Test] + public void Does_SortedSetContainsValue() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + + Assert.That(Redis.SortedSetContainsItem(SetId, existingMember), Is.True); + Assert.That(Redis.SortedSetContainsItem(SetId, nonExistingMember), Is.False); + } + + [Test] + public void Can_GetItemIndexInSortedSet_in_Asc_and_Desc() + { + var i = 10; + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); + + Assert.That(Redis.GetItemIndexInSortedSet(SetId, "one"), Is.EqualTo(0)); + Assert.That(Redis.GetItemIndexInSortedSet(SetId, "two"), Is.EqualTo(1)); + Assert.That(Redis.GetItemIndexInSortedSet(SetId, "three"), Is.EqualTo(2)); + Assert.That(Redis.GetItemIndexInSortedSet(SetId, "four"), Is.EqualTo(3)); + + Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "one"), Is.EqualTo(3)); + Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "two"), Is.EqualTo(2)); + Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "three"), Is.EqualTo(1)); + Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "four"), Is.EqualTo(0)); + } + + [Test] + public void Can_Store_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + string storeSetName = PrefixedKey("testintersectsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + set1Members.ForEach(x => Redis.AddItemToSortedSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSortedSet(set2Name, x)); + + Redis.StoreIntersectFromSortedSets(storeSetName, set1Name, set2Name); + + var intersectingMembers = Redis.GetAllItemsFromSortedSet(storeSetName); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public void Can_Store_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + string storeSetName = PrefixedKey("testunionsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + set1Members.ForEach(x => Redis.AddItemToSortedSet(set1Name, x)); + set2Members.ForEach(x => Redis.AddItemToSortedSet(set2Name, x)); + + Redis.StoreUnionFromSortedSets(storeSetName, set1Name, set2Name); + + var unionMembers = Redis.GetAllItemsFromSortedSet(storeSetName); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public void Can_pop_items_with_lowest_and_highest_scores_from_sorted_set() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + + var lowestScore = Redis.PopItemWithLowestScoreFromSortedSet(SetId); + Assert.That(lowestScore, Is.EqualTo(storeMembers.First())); + + var highestScore = Redis.PopItemWithHighestScoreFromSortedSet(SetId); + Assert.That(highestScore, Is.EqualTo(storeMembers[storeMembers.Count - 1])); + } + + [Test, Ignore("seems unstable?")] + public void Can_GetRangeFromSortedSetByLowestScore_from_sorted_set() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + var memberRage = storeMembers.Where(x => + x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); + + var range = Redis.GetRangeFromSortedSetByLowestScore(SetId, "four", "three"); + Assert.That(range.EquivalentTo(memberRage)); + } + + [Test] + public void Can_IncrementItemInSortedSet() + { + stringDoubleMap.Each(x => Redis.AddItemToSortedSet(SetId, x.Key, x.Value)); + + var currentScore = Redis.IncrementItemInSortedSet(SetId, "one", 3); + stringDoubleMap["one"] = stringDoubleMap["one"] + 3; + Assert.That(currentScore, Is.EqualTo(stringDoubleMap["one"])); + + currentScore = Redis.IncrementItemInSortedSet(SetId, "four", -3); + stringDoubleMap["four"] = stringDoubleMap["four"] - 3; + Assert.That(currentScore, Is.EqualTo(stringDoubleMap["four"])); + + var map = Redis.GetAllWithScoresFromSortedSet(SetId); + + Assert.That(stringDoubleMap.UnorderedEquivalentTo(map)); + } [Test] public void Can_WorkInSortedSetUnderDifferentCulture() { +#if NETCORE + var prevCulture = CultureInfo.CurrentCulture; + CultureInfo.CurrentCulture = new CultureInfo("ru-RU"); +#else + var prevCulture = Thread.CurrentThread.CurrentCulture; Thread.CurrentThread.CurrentCulture = CultureInfo.CreateSpecificCulture("ru-RU"); +#endif Redis.AddItemToSortedSet(SetId, "key", 123.22); var map = Redis.GetAllWithScoresFromSortedSet(SetId); Assert.AreEqual(123.22, map["key"]); + +#if NETCORE + CultureInfo.CurrentCulture = prevCulture; +#else + Thread.CurrentThread.CurrentCulture = prevCulture; +#endif + } + + + [Ignore("Not implemented yet")] + [Test] + public void Can_GetRangeFromSortedSetByHighestScore_from_sorted_set() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + + storeMembers.Sort((x, y) => y.CompareTo(x)); + var memberRage = storeMembers.Where(x => + x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); + + var range = Redis.GetRangeFromSortedSetByHighestScore(SetId, "four", "three"); + Assert.That(range.EquivalentTo(memberRage)); + } + + [Test] + public void Can_get_index_and_score_from_SortedSet() + { + storeMembers = new List { "a", "b", "c", "d" }; + const double initialScore = 10d; + var i = initialScore; + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); + + Assert.That(Redis.GetItemIndexInSortedSet(SetId, "a"), Is.EqualTo(0)); + Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "a"), Is.EqualTo(storeMembers.Count - 1)); + + Assert.That(Redis.GetItemScoreInSortedSet(SetId, "a"), Is.EqualTo(initialScore)); + Assert.That(Redis.GetItemScoreInSortedSet(SetId, "d"), Is.EqualTo(initialScore + storeMembers.Count - 1)); + } + + [Test] + public void Can_enumerate_small_ICollection_Set() + { + storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); + + var members = new List(); + foreach (var item in Redis.SortedSets[SetId]) + { + members.Add(item); + } + members.Sort(); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + storeMembers = new List(); + setSize.Times(x => + { + Redis.AddItemToSortedSet(SetId, x.ToString()); + storeMembers.Add(x.ToString()); + }); + + var members = new List(); + foreach (var item in Redis.SortedSets[SetId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_Add_to_ICollection_Set() + { + var list = Redis.SortedSets[SetId]; + storeMembers.ForEach(list.Add); + + var members = list.ToList(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Can_Clear_ICollection_Set() + { + var list = Redis.SortedSets[SetId]; + storeMembers.ForEach(list.Add); + + Assert.That(list.Count, Is.EqualTo(storeMembers.Count)); + + list.Clear(); + + Assert.That(list.Count, Is.EqualTo(0)); } + [Test] + public void Can_Test_Contains_in_ICollection_Set() + { + var list = Redis.SortedSets[SetId]; + storeMembers.ForEach(list.Add); + + Assert.That(list.Contains("two"), Is.True); + Assert.That(list.Contains("five"), Is.False); + } - [Ignore("Not implemented yet")] - [Test] - public void Can_GetRangeFromSortedSetByHighestScore_from_sorted_set() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - - storeMembers.Sort((x, y) => y.CompareTo(x)); - var memberRage = storeMembers.Where(x => - x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); - - var range = Redis.GetRangeFromSortedSetByHighestScore(SetId, "four", "three"); - Assert.That(range.EquivalentTo(memberRage)); - } - - [Test] - public void Can_get_index_and_score_from_SortedSet() - { - storeMembers = new List { "a", "b", "c", "d" }; - const double initialScore = 10d; - var i = initialScore; - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x, i++)); - - Assert.That(Redis.GetItemIndexInSortedSet(SetId, "a"), Is.EqualTo(0)); - Assert.That(Redis.GetItemIndexInSortedSetDesc(SetId, "a"), Is.EqualTo(storeMembers.Count - 1)); - - Assert.That(Redis.GetItemScoreInSortedSet(SetId, "a"), Is.EqualTo(initialScore)); - Assert.That(Redis.GetItemScoreInSortedSet(SetId, "d"), Is.EqualTo(initialScore + storeMembers.Count - 1)); - } - - [Test] - public void Can_enumerate_small_ICollection_Set() - { - storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); - - var members = new List(); - foreach (var item in Redis.SortedSets[SetId]) - { - members.Add(item); - } - members.Sort(); - Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_enumerate_large_ICollection_Set() - { - if (TestConfig.IgnoreLongTests) return; - - const int setSize = 2500; - - storeMembers = new List(); - setSize.Times(x => - { - Redis.AddItemToSortedSet(SetId, x.ToString()); - storeMembers.Add(x.ToString()); - }); - - var members = new List(); - foreach (var item in Redis.SortedSets[SetId]) - { - members.Add(item); - } - members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); - Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_Add_to_ICollection_Set() - { - var list = Redis.SortedSets[SetId]; - storeMembers.ForEach(list.Add); - - var members = list.ToList(); - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Can_Clear_ICollection_Set() - { - var list = Redis.SortedSets[SetId]; - storeMembers.ForEach(list.Add); - - Assert.That(list.Count, Is.EqualTo(storeMembers.Count)); - - list.Clear(); - - Assert.That(list.Count, Is.EqualTo(0)); - } - - [Test] - public void Can_Test_Contains_in_ICollection_Set() - { - var list = Redis.SortedSets[SetId]; - storeMembers.ForEach(list.Add); - - Assert.That(list.Contains("two"), Is.True); - Assert.That(list.Contains("five"), Is.False); - } - - [Test] - public void Can_Remove_value_from_ICollection_Set() - { - var list = Redis.SortedSets[SetId]; - storeMembers.ForEach(list.Add); - - storeMembers.Remove("two"); - list.Remove("two"); - - var members = list.ToList(); - - Assert.That(members, Is.EquivalentTo(storeMembers)); - } - - [Test] - public void Score_from_non_existent_item_returns_NaN() - { - var score = Redis.GetItemScoreInSortedSet("nonexistentset", "value"); - - Assert.That(score, Is.EqualTo(Double.NaN)); - } - - [Test] - public void Can_add_large_score_to_sortedset() - { - Redis.AddItemToSortedSet(SetId, "value", 12345678901234567890d); - var score = Redis.GetItemScoreInSortedSet(SetId, "value"); - - Assert.That(score, Is.EqualTo(12345678901234567890d)); - } - - } + [Test] + public void Can_Remove_value_from_ICollection_Set() + { + var list = Redis.SortedSets[SetId]; + storeMembers.ForEach(list.Add); + + storeMembers.Remove("two"); + list.Remove("two"); + + var members = list.ToList(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public void Score_from_non_existent_item_returns_NaN() + { + var score = Redis.GetItemScoreInSortedSet("nonexistentset", "value"); + + Assert.That(score, Is.EqualTo(Double.NaN)); + } + + [Test] + public void Can_add_large_score_to_sortedset() + { + Redis.AddItemToSortedSet(SetId, "value", 12345678901234567890d); + var score = Redis.GetItemScoreInSortedSet(SetId, "value"); + + Assert.That(score, Is.EqualTo(12345678901234567890d)); + } + + public class Article + { + public int Id { get; set; } + public string Title { get; set; } + public DateTime ModifiedDate { get; set; } + } + + [Test] + public void Can_use_SortedIndex_to_store_articles_by_Date() + { + var redisArticles = Redis.As
(); + + var articles = new[] + { + new Article { Id = 1, Title = "Article 1", ModifiedDate = new DateTime(2015, 01, 02) }, + new Article { Id = 2, Title = "Article 2", ModifiedDate = new DateTime(2015, 01, 01) }, + new Article { Id = 3, Title = "Article 3", ModifiedDate = new DateTime(2015, 01, 03) }, + }; + + redisArticles.StoreAll(articles); + + const string LatestArticlesSet = "urn:Article:modified"; + + foreach (var article in articles) + { + Redis.AddItemToSortedSet(LatestArticlesSet, article.Id.ToString(), article.ModifiedDate.Ticks); + } + + var articleIds = Redis.GetAllItemsFromSortedSetDesc(LatestArticlesSet); + articleIds.PrintDump(); + + var latestArticles = redisArticles.GetByIds(articleIds); + latestArticles.PrintDump(); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientTests.Async.cs new file mode 100644 index 00000000..b5b7aad3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientTests.Async.cs @@ -0,0 +1,673 @@ +using NUnit.Framework; +using ServiceStack.Redis.Support.Locking; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisClientTestsAsync + : RedisClientTestsBaseAsync + { + const string Value = "Value"; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = nameof(RedisClientTestsAsync); + } + + [Test] + public async Task Can_Set_and_Get_string() + { + await RedisAsync.SetValueAsync("key", Value); + var valueBytes = await NativeAsync.GetAsync("key"); + var valueString = GetString(valueBytes); + await RedisAsync.RemoveAsync("key"); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_space() + { + await RedisAsync.SetValueAsync("key with space", Value); + var valueBytes = await NativeAsync.GetAsync("key with space"); + var valueString = GetString(valueBytes); + await RedisAsync.RemoveAsync("key with space"); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_spaces() + { + const string key = "key with spaces"; + + await RedisAsync.SetValueAsync(key, Value); + var valueBytes = await NativeAsync.GetAsync(key); + var valueString = GetString(valueBytes); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + await RedisAsync.SetAsync(key, value); + var resultValue = await NativeAsync.GetAsync(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + [Test] + public async Task GetKeys_returns_matching_collection() + { + await RedisAsync.SetAsync("ss-tests:a1", "One"); + await RedisAsync.SetAsync("ss-tests:a2", "One"); + await RedisAsync.SetAsync("ss-tests:b3", "One"); + + var matchingKeys = await RedisAsync.SearchKeysAsync("ss-tests:a*"); + + Assert.That(matchingKeys.Count, Is.EqualTo(2)); + } + + [Test] + public async Task GetKeys_on_non_existent_keys_returns_empty_collection() + { + var matchingKeys = await RedisAsync.SearchKeysAsync("ss-tests:NOTEXISTS"); + + Assert.That(matchingKeys.Count, Is.EqualTo(0)); + } + + [Test] + public async Task Can_get_Types() + { + await RedisAsync.SetValueAsync("string", "string"); + await RedisAsync.AddItemToListAsync("list", "list"); + await RedisAsync.AddItemToSetAsync("set", "set"); + await RedisAsync.AddItemToSortedSetAsync("sortedset", "sortedset"); + await RedisAsync.SetEntryInHashAsync("hash", "key", "val"); + + Assert.That(await RedisAsync.GetEntryTypeAsync("nokey"), Is.EqualTo(RedisKeyType.None)); + Assert.That(await RedisAsync.GetEntryTypeAsync("string"), Is.EqualTo(RedisKeyType.String)); + Assert.That(await RedisAsync.GetEntryTypeAsync("list"), Is.EqualTo(RedisKeyType.List)); + Assert.That(await RedisAsync.GetEntryTypeAsync("set"), Is.EqualTo(RedisKeyType.Set)); + Assert.That(await RedisAsync.GetEntryTypeAsync("sortedset"), Is.EqualTo(RedisKeyType.SortedSet)); + Assert.That(await RedisAsync.GetEntryTypeAsync("hash"), Is.EqualTo(RedisKeyType.Hash)); + } + + [Test] + public async Task Can_delete_keys() + { + await RedisAsync.SetValueAsync("key", "val"); + + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + + await RedisAsync.RemoveAsync("key"); + + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add("key" + i, "val" + i)); + + await RedisAsync.SetAllAsync(keysMap); + + for (int i = 0; i < 10; i++) + Assert.That(await RedisAsync.ContainsKeyAsync("key" + i), Is.True); + + await RedisAsync.RemoveEntryAsync(keysMap.Keys.ToArray()); + + for (int i = 0; i < 10; i++) + Assert.That(await RedisAsync.ContainsKeyAsync("key" + i), Is.False); + } + + [Test] + public async Task Can_get_RandomKey() + { + await RedisAsync.SelectAsync(15); + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add(RedisRaw.NamespacePrefix + "key" + i, "val" + i)); + + await RedisAsync.SetAllAsync(keysMap); + + var randKey = await RedisAsync.GetRandomKeyAsync(); + + Assert.That(keysMap.ContainsKey(randKey), Is.True); + } + + [Test] + public async Task Can_RenameKey() + { + await RedisAsync.SetValueAsync("oldkey", "val"); + await NativeAsync.RenameAsync("oldkey", "newkey"); + + Assert.That(await RedisAsync.ContainsKeyAsync("oldkey"), Is.False); + Assert.That(await RedisAsync.ContainsKeyAsync("newkey"), Is.True); + } + + [Test] + public async Task Can_Expire() + { + await RedisAsync.SetValueAsync("key", "val"); + await RedisAsync.ExpireEntryInAsync("key", TimeSpan.FromSeconds(1)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Expire_Ms() + { + await RedisAsync.SetValueAsync("key", "val"); + await RedisAsync.ExpireEntryInAsync("key", TimeSpan.FromMilliseconds(100)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(500); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Ignore("Changes in system clock can break test")] + [Test] + public async Task Can_ExpireAt() + { + await RedisAsync.SetValueAsync("key", "val"); + + var unixNow = DateTime.Now.ToUnixTime(); + var in2Secs = unixNow + 2; + + await NativeAsync.ExpireAtAsync("key", in2Secs); + + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(3000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_GetTimeToLive() + { + await RedisAsync.SetValueAsync("key", "val"); + await RedisAsync.ExpireEntryInAsync("key", TimeSpan.FromSeconds(10)); + + var ttl = await RedisAsync.GetTimeToLiveAsync("key"); + Assert.That(ttl.Value.TotalSeconds, Is.GreaterThanOrEqualTo(9)); + await Task.Delay(1700); + + ttl = await RedisAsync.GetTimeToLiveAsync("key"); + Assert.That(ttl.Value.TotalSeconds, Is.LessThanOrEqualTo(9)); + } + + [Test] + public async Task Can_GetServerTime() + { + var now = await RedisAsync.GetServerTimeAsync(); + + now.Kind.PrintDump(); + now.ToString("D").Print(); + now.ToString("T").Print(); + + "UtcNow".Print(); + DateTime.UtcNow.ToString("D").Print(); + DateTime.UtcNow.ToString("T").Print(); + + Assert.That(now.Date, Is.EqualTo(DateTime.UtcNow.Date)); + } + + [Test] + public async Task Can_Ping() + { + Assert.That(await RedisAsync.PingAsync(), Is.True); + } + + [Test] + public async Task Can_Echo() + { + Assert.That(await RedisAsync.EchoAsync("Hello"), Is.EqualTo("Hello")); + } + + [Test] + public async Task Can_SlaveOfNoOne() + { + await NativeAsync.SlaveOfNoOneAsync(); + } + + [Test] + public async Task Can_Save() + { + try + { + await NativeAsync.SaveAsync(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; + + throw; + } + } + + [Test] + public async Task Can_BgSave() + { + try + { + await NativeAsync.BgSaveAsync(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; + + throw; + } + } + + [Test] + public async Task Can_Quit() + { + await NativeAsync.QuitAsync(); + RedisRaw.NamespacePrefix = null; + CleanMask = null; + } + + [Test] + public async Task Can_BgRewriteAof() + { + await NativeAsync.BgRewriteAofAsync(); + } + + [Test] + [Ignore("Works too well and shutdown the server")] + public async Task Can_Shutdown() + { + await RedisAsync.ShutdownAsync(); + } + + [Test] + public async Task Can_get_Keys_with_pattern() + { + for (int i = 0; i < 5; i++) + await RedisAsync.SetValueAsync("k1:" + i, "val"); + for (int i = 0; i < 5; i++) + await RedisAsync.SetValueAsync("k2:" + i, "val"); + + var keys = await NativeAsync.KeysAsync("k1:*"); + Assert.That(keys.Length, Is.EqualTo(5)); + + var scanKeys = await RedisAsync.SearchKeysAsync("k1:*"); + Assert.That(scanKeys.Count, Is.EqualTo(5)); + } + + [Test] + public async Task Can_GetAll() + { + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add("key" + i, "val" + i)); + + await RedisAsync.SetAllAsync(keysMap); + + var map = await RedisAsync.GetAllAsync(keysMap.Keys); + var mapKeys = await RedisAsync.GetValuesAsync(keysMap.Keys.ToList()); + + foreach (var entry in keysMap) + { + Assert.That(map.ContainsKey(entry.Key), Is.True); + Assert.That(mapKeys.Contains(entry.Value), Is.True); + } + } + + [Test] + public async Task Can_GetValues_JSON_strings() + { + var val = "{\"AuthorId\":0,\"Created\":\"\\/Date(1345961754013)\\/\",\"Name\":\"test\",\"Base64\":\"BQELAAEBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP8BWAFYAViA/wFYAVgBWID/AVgBWAFYgP8BWAFYAViA/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWID/gP+A/wFYgP+A/4D/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/AViA/4D/AVgBWAFYgP8BWAFYAViA/4D/AViA/4D/gP+A/4D/gP8BWAFYgP+A/wFYgP+A/wFYgP+A/4D/gP+A/wFYgP+A/wFYgP+A/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWAFYAViA/wFYAVgBWID/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}"; + + await RedisAsync.SetValueAsync("UserLevel/1", val); + + var vals = await RedisAsync.GetValuesAsync(new List(new[] { "UserLevel/1" })); + + Assert.That(vals.Count, Is.EqualTo(1)); + Assert.That(vals[0], Is.EqualTo(val)); + } + + [Test] + public async Task Can_AcquireLock() + { + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); + await RedisAsync.IncrementValueAsync(key); //1 + + Task[] tasks = new Task[5]; + for (int i = 0; i < tasks.Length; i++) + { + var snapsot = i; + tasks[snapsot] = Task.Run( + () => IncrementKeyInsideLock(key, lockKey, snapsot, new RedisClient(TestConfig.SingleHost) { NamespacePrefix = RedisRaw.NamespacePrefix }.ForAsyncOnly()) + ); + } + await Task.WhenAll(tasks); + + var val = await RedisAsync.GetAsync(key); + + Assert.That(val, Is.EqualTo(1 + 5)); + } + + private async Task IncrementKeyInsideLock(String key, String lockKey, int clientNo, IRedisClientAsync client) + { + await using (await client.AcquireLockAsync(lockKey)) + { + Debug.WriteLine(String.Format("client {0} acquired lock", clientNo)); + var val = await client.GetAsync(key); + + await Task.Delay(200); + + await client.SetAsync(key, val + 1); + Debug.WriteLine(String.Format("client {0} released lock", clientNo)); + } + } + + [Test] + public async Task Can_AcquireLock_TimeOut() + { + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); + await RedisAsync.IncrementValueAsync(key); //1 + _ = await RedisAsync.AcquireLockAsync(lockKey); + var waitFor = TimeSpan.FromMilliseconds(1000); + var now = DateTime.Now; + + try + { + await using var client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await using (await client.AcquireLockAsync(lockKey, waitFor)) + { + await client.IncrementValueAsync(key); //2 + } + } + catch (TimeoutException) + { + var val = await RedisAsync.GetAsync(key); + Assert.That(val, Is.EqualTo(1)); + + var timeTaken = DateTime.Now - now; + Assert.That(timeTaken.TotalMilliseconds > waitFor.TotalMilliseconds, Is.True); + Assert.That(timeTaken.TotalMilliseconds < waitFor.TotalMilliseconds + 1000, Is.True); + return; + } + finally + { + await RedisAsync.RemoveAsync(key); + await RedisAsync.RemoveAsync(lockKey); + } + Assert.Fail("should have Timed out"); + } + + [Test] + public async Task Can_Append() + { + const string expectedString = "Hello, " + "World!"; + await RedisAsync.SetValueAsync("key", "Hello, "); + var currentLength = await RedisAsync.AppendToValueAsync("key", "World!"); + + Assert.That(currentLength, Is.EqualTo(expectedString.Length)); + + var val = await RedisAsync.GetValueAsync("key"); + Assert.That(val, Is.EqualTo(expectedString)); + } + + [Test] + public async Task Can_GetRange() + { + const string helloWorld = "Hello, World!"; + await RedisAsync.SetValueAsync("key", helloWorld); + + var fromIndex = "Hello, ".Length; + var toIndex = "Hello, World".Length - 1; + + var expectedString = helloWorld.Substring(fromIndex, toIndex - fromIndex + 1); + var world = await NativeAsync.GetRangeAsync("key", fromIndex, toIndex); + + Assert.That(world.Length, Is.EqualTo(expectedString.Length)); + } + + [Test] + public async Task Can_create_distributed_lock() + { + var key = "lockkey"; + int lockTimeout = 2; + + var distributedLock = new DistributedLock().AsAsync(); + + var state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + Assert.AreEqual(state.Result, DistributedLock.LOCK_ACQUIRED); + + //can't re-lock + distributedLock = new DistributedLock(); + state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + Assert.AreEqual(state.Result, DistributedLock.LOCK_NOT_ACQUIRED); + + // re-acquire lock after timeout + await Task.Delay(lockTimeout * 1000 + 1000); + distributedLock = new DistributedLock(); + state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + + (var result, var expire) = state; // test decomposition since we are here + Assert.AreEqual(result, DistributedLock.LOCK_RECOVERED); + + Assert.IsTrue(await distributedLock.UnlockAsync(key, expire, RedisAsync)); + + //can now lock + distributedLock = new DistributedLock(); + state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + Assert.AreEqual(state.Result, DistributedLock.LOCK_ACQUIRED); + + //cleanup + Assert.IsTrue(await distributedLock.UnlockAsync(key, state.Expiration, RedisAsync)); + } + + public class MyPoco + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public async Task Can_StoreObject() + { + object poco = new MyPoco { Id = 1, Name = "Test" }; + + await RedisAsync.StoreObjectAsync(poco); + + Assert.That(await RedisAsync.GetValueAsync(RedisRaw.NamespacePrefix + "urn:mypoco:1"), Is.EqualTo("{\"Id\":1,\"Name\":\"Test\"}")); + + Assert.That(await RedisAsync.PopItemFromSetAsync(RedisRaw.NamespacePrefix + "ids:MyPoco"), Is.EqualTo("1")); + } + + [Test] + public async Task Can_store_multiple_keys() + { + var keys = 5.Times(x => "key" + x); + var vals = 5.Times(x => "val" + x); + + using var redis = RedisClient.New(); + await RedisAsync.SetAllAsync(keys, vals); + + var all = await RedisAsync.GetValuesAsync(keys); + Assert.AreEqual(vals, all); + } + + [Test] + public async Task Can_store_Dictionary() + { + var keys = 5.Times(x => "key" + x); + var vals = 5.Times(x => "val" + x); + var map = new Dictionary(); + keys.ForEach(x => map[x] = "val" + x); + + await using var client = RedisClient.New().ForAsyncOnly(); + await client.SetAllAsync(map); + + var all = await client.GetValuesMapAsync(keys); + Assert.AreEqual(map, all); + } + + [Test] + public async Task Can_store_Dictionary_as_objects() + { + var map = new Dictionary + { + ["key_a"] = "123", + ["key_b"] = null + }; + + await using var client = RedisClient.New().ForAsyncOnly(); + + await client.SetAllAsync(map); + + Assert.That(await client.GetAsync("key_a"), Is.EqualTo("123")); + Assert.That(await client.GetValueAsync("key_b"), Is.EqualTo("")); + } + + + [Test] + public async Task Can_store_Dictionary_as_bytes() + { + var map = new Dictionary + { + ["key_a"] = "123".ToUtf8Bytes(), + ["key_b"] = null + }; + + await using var client = RedisClient.New().ForAsyncOnly(); + + await client.SetAllAsync(map); + + Assert.That(await client.GetAsync("key_a"), Is.EqualTo("123")); + Assert.That(await client.GetValueAsync("key_b"), Is.EqualTo("")); + } + + [Test] + public async Task Should_reset_slowlog() + { + await using var client = RedisClient.New().ForAsyncOnly(); + await client.SlowlogResetAsync(); + } + + [Test] + public async Task Can_get_slowlog() + { + await using var client = RedisClient.New().ForAsyncOnly(); + + var log = await client.GetSlowlogAsync(10); + + foreach (var t in log) + { + Console.WriteLine(t.Id); + Console.WriteLine(t.Duration); + Console.WriteLine(t.Timestamp); + Console.WriteLine(string.Join(":", t.Arguments)); + } + } + + + [Test] + public async Task Can_change_db_at_runtime() + { + await using var redis = new RedisClient(TestConfig.SingleHost, TestConfig.RedisPort, db: 1).ForAsyncOnly(); + + var val = Environment.TickCount; + var key = "test" + val; + try + { + await redis.SetAsync(key, val); + await redis.SelectAsync(2); + Assert.That(await redis.GetAsync(key), Is.EqualTo(0)); + await redis.SelectAsync(1); + Assert.That(await redis.GetAsync(key), Is.EqualTo(val)); + await redis.DisposeAsync(); + } + finally + { + await redis.SelectAsync(1); + await redis.RemoveAsync(key); + } + } + + [Test] + public async Task Can_Set_Expire_Seconds() + { + await RedisAsync.SetValueAsync("key", "val", expireIn: TimeSpan.FromSeconds(1)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Set_Expire_MilliSeconds() + { + await RedisAsync.SetValueAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Set_Expire_Seconds_if_exists() + { + Assert.That(await RedisAsync.SetValueIfExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1500)), + Is.False); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + + await RedisAsync.SetValueAsync("key", "val"); + Assert.That(await RedisAsync.SetValueIfExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.True); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Set_Expire_Seconds_if_not_exists() + { + Assert.That(await RedisAsync.SetValueIfNotExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.True); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + + Assert.That(await RedisAsync.SetValueIfNotExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.False); + + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + + await RedisAsync.RemoveAsync("key"); + await RedisAsync.SetValueIfNotExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + } + } + +} diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientTests.cs index 17921c26..19bfbfe5 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientTests.cs @@ -1,20 +1,21 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Linq; using System.Text; using System.Threading; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Redis.Support.Locking; using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class RedisClientTests - : RedisClientTestsBase - { - const string Value = "Value"; + [TestFixture, Category("Integration")] + public class RedisClientTests + : RedisClientTestsBase + { + const string Value = "Value"; public override void OnBeforeEachTest() { @@ -22,375 +23,431 @@ public override void OnBeforeEachTest() Redis.NamespacePrefix = "RedisClientTests"; } - [Test] - public void Can_Set_and_Get_string() - { - Redis.SetEntry("key", Value); - var valueBytes = Redis.Get("key"); - var valueString = GetString(valueBytes); - Redis.Remove("key"); + [Test] + public void Can_Set_and_Get_string() + { + Redis.SetValue("key", Value); + var valueBytes = Redis.Get("key"); + var valueString = GetString(valueBytes); + Redis.Remove("key"); - Assert.That(valueString, Is.EqualTo(Value)); - } + Assert.That(valueString, Is.EqualTo(Value)); + } - [Test] - public void Can_Set_and_Get_key_with_space() - { - Redis.SetEntry("key with space", Value); - var valueBytes = Redis.Get("key with space"); - var valueString = GetString(valueBytes); + [Test] + public void Can_Set_and_Get_key_with_space() + { + Redis.SetValue("key with space", Value); + var valueBytes = Redis.Get("key with space"); + var valueString = GetString(valueBytes); Redis.Remove("key with space"); - Assert.That(valueString, Is.EqualTo(Value)); - } + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public void Can_Set_and_Get_key_with_spaces() + { + const string key = "key with spaces"; + + Redis.SetValue(key, Value); + var valueBytes = Redis.Get(key); + var valueString = GetString(valueBytes); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public void Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + Redis.Set(key, value); + var resultValue = Redis.Get(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + [Test] + public void GetKeys_returns_matching_collection() + { + Redis.Set("ss-tests:a1", "One"); + Redis.Set("ss-tests:a2", "One"); + Redis.Set("ss-tests:b3", "One"); + + var matchingKeys = Redis.SearchKeys("ss-tests:a*"); + + Assert.That(matchingKeys.Count, Is.EqualTo(2)); + } + + [Test] + public void GetKeys_on_non_existent_keys_returns_empty_collection() + { + var matchingKeys = Redis.SearchKeys("ss-tests:NOTEXISTS"); + + Assert.That(matchingKeys.Count, Is.EqualTo(0)); + } + + [Test] + public void Can_get_Types() + { + Redis.SetValue("string", "string"); + Redis.AddItemToList("list", "list"); + Redis.AddItemToSet("set", "set"); + Redis.AddItemToSortedSet("sortedset", "sortedset"); + Redis.SetEntryInHash("hash", "key", "val"); + + Assert.That(Redis.GetEntryType("nokey"), Is.EqualTo(RedisKeyType.None)); + Assert.That(Redis.GetEntryType("string"), Is.EqualTo(RedisKeyType.String)); + Assert.That(Redis.GetEntryType("list"), Is.EqualTo(RedisKeyType.List)); + Assert.That(Redis.GetEntryType("set"), Is.EqualTo(RedisKeyType.Set)); + Assert.That(Redis.GetEntryType("sortedset"), Is.EqualTo(RedisKeyType.SortedSet)); + Assert.That(Redis.GetEntryType("hash"), Is.EqualTo(RedisKeyType.Hash)); + } + + [Test] + public void Can_delete_keys() + { + Redis.SetValue("key", "val"); + + Assert.That(Redis.ContainsKey("key"), Is.True); + + Redis.Del("key"); + + Assert.That(Redis.ContainsKey("key"), Is.False); + + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add("key" + i, "val" + i)); + + Redis.SetAll(keysMap); + + 10.Times(i => Assert.That(Redis.ContainsKey("key" + i), Is.True)); + + Redis.Del(keysMap.Keys.ToArray()); + + 10.Times(i => Assert.That(Redis.ContainsKey("key" + i), Is.False)); + } + + [Test] + public void Can_get_RandomKey() + { + Redis.Db = 15; + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add(Redis.NamespacePrefix + "key" + i, "val" + i)); + + Redis.SetAll(keysMap); - [Test] - public void Can_Set_and_Get_key_with_spaces() - { - const string key = "key with spaces"; + var randKey = Redis.RandomKey(); - Redis.SetEntry(key, Value); - var valueBytes = Redis.Get(key); - var valueString = GetString(valueBytes); + Assert.That(keysMap.ContainsKey(randKey), Is.True); + } - Assert.That(valueString, Is.EqualTo(Value)); - } + [Test] + public void Can_RenameKey() + { + Redis.SetValue("oldkey", "val"); + Redis.Rename("oldkey", "newkey"); - [Test] - public void Can_Set_and_Get_key_with_all_byte_values() - { - const string key = "bytesKey"; + Assert.That(Redis.ContainsKey("oldkey"), Is.False); + Assert.That(Redis.ContainsKey("newkey"), Is.True); + } - var value = new byte[256]; - for (var i = 0; i < value.Length; i++) - { - value[i] = (byte)i; - } + [Test] + public void Can_Expire() + { + Redis.SetValue("key", "val"); + Redis.Expire("key", 1); + Assert.That(Redis.ContainsKey("key"), Is.True); + Thread.Sleep(2000); + Assert.That(Redis.ContainsKey("key"), Is.False); + } - Redis.Set(key, value); - var resultValue = Redis.Get(key); + [Test] + public void Can_Expire_Ms() + { + Redis.SetValue("key", "val"); + Redis.ExpireEntryIn("key", TimeSpan.FromMilliseconds(100)); + Assert.That(Redis.ContainsKey("key"), Is.True); + Thread.Sleep(500); + Assert.That(Redis.ContainsKey("key"), Is.False); + } - Assert.That(resultValue, Is.EquivalentTo(value)); - } + [Ignore("Changes in system clock can break test")] + [Test] + public void Can_ExpireAt() + { + Redis.SetValue("key", "val"); - [Test] - public void GetKeys_returns_matching_collection() - { - Redis.Set("ss-tests:a1", "One"); - Redis.Set("ss-tests:a2", "One"); - Redis.Set("ss-tests:b3", "One"); + var unixNow = DateTime.Now.ToUnixTime(); + var in2Secs = unixNow + 2; - var matchingKeys = Redis.SearchKeys("ss-tests:a*"); + Redis.ExpireAt("key", in2Secs); - Assert.That(matchingKeys.Count, Is.EqualTo(2)); - } + Assert.That(Redis.ContainsKey("key"), Is.True); + Thread.Sleep(3000); + Assert.That(Redis.ContainsKey("key"), Is.False); + } - [Test] - public void GetKeys_on_non_existent_keys_returns_empty_collection() - { - var matchingKeys = Redis.SearchKeys("ss-tests:NOTEXISTS"); + [Test] + public void Can_GetTimeToLive() + { + Redis.SetValue("key", "val"); + Redis.Expire("key", 10); - Assert.That(matchingKeys.Count, Is.EqualTo(0)); - } + var ttl = Redis.GetTimeToLive("key"); + Assert.That(ttl.Value.TotalSeconds, Is.GreaterThanOrEqualTo(9)); + Thread.Sleep(1700); - [Test] - public void Can_get_Types() - { - Redis.SetEntry("string", "string"); - Redis.AddItemToList("list", "list"); - Redis.AddItemToSet("set", "set"); - Redis.AddItemToSortedSet("sortedset", "sortedset"); - Redis.SetEntryInHash("hash", "key", "val"); + ttl = Redis.GetTimeToLive("key"); + Assert.That(ttl.Value.TotalSeconds, Is.LessThanOrEqualTo(9)); + } - Assert.That(Redis.GetEntryType("nokey"), Is.EqualTo(RedisKeyType.None)); - Assert.That(Redis.GetEntryType("string"), Is.EqualTo(RedisKeyType.String)); - Assert.That(Redis.GetEntryType("list"), Is.EqualTo(RedisKeyType.List)); - Assert.That(Redis.GetEntryType("set"), Is.EqualTo(RedisKeyType.Set)); - Assert.That(Redis.GetEntryType("sortedset"), Is.EqualTo(RedisKeyType.SortedSet)); - Assert.That(Redis.GetEntryType("hash"), Is.EqualTo(RedisKeyType.Hash)); - } + [Test] + public void Can_GetServerTime() + { + var now = Redis.GetServerTime(); - [Test] - public void Can_delete_keys() - { - Redis.SetEntry("key", "val"); + now.Kind.PrintDump(); + now.ToString("D").Print(); + now.ToString("T").Print(); - Assert.That(Redis.ContainsKey("key"), Is.True); + "UtcNow".Print(); + DateTime.UtcNow.ToString("D").Print(); + DateTime.UtcNow.ToString("T").Print(); - Redis.Del("key"); + Assert.That(now.Date, Is.EqualTo(DateTime.UtcNow.Date)); + } - Assert.That(Redis.ContainsKey("key"), Is.False); - - var keysMap = new Dictionary(); + [Test] + public void Can_Ping() + { + Assert.That(Redis.Ping(), Is.True); + } - 10.Times(i => keysMap.Add("key" + i, "val" + i)); - - Redis.SetAll(keysMap); - - 10.Times(i => Assert.That(Redis.ContainsKey("key" + i), Is.True)); - - Redis.Del(keysMap.Keys.ToList().ToArray()); - - 10.Times(i => Assert.That(Redis.ContainsKey("key" + i), Is.False)); - } - - [Test] - public void Can_get_RandomKey() - { - Redis.Db = 15; - var keysMap = new Dictionary(); - - 10.Times(i => keysMap.Add(Redis.NamespacePrefix + "key" + i, "val" + i)); - - Redis.SetAll(keysMap); - - var randKey = Redis.RandomKey(); - - Assert.That(keysMap.ContainsKey(randKey), Is.True); - } - - [Test] - public void Can_RenameKey() - { - Redis.SetEntry("oldkey", "val"); - Redis.Rename("oldkey", "newkey"); - - Assert.That(Redis.ContainsKey("oldkey"), Is.False); - Assert.That(Redis.ContainsKey("newkey"), Is.True); - } - - [Test] - public void Can_Expire() - { - Redis.SetEntry("key", "val"); - Redis.Expire("key", 1); - Assert.That(Redis.ContainsKey("key"), Is.True); - Thread.Sleep(2000); - Assert.That(Redis.ContainsKey("key"), Is.False); - } - - [Test] - public void Can_ExpireAt() - { - Redis.SetEntry("key", "val"); - - var unixNow = DateTime.Now.ToUnixTime(); - var in1Sec = unixNow + 1; - - Redis.ExpireAt("key", in1Sec); + [Test] + public void Can_Echo() + { + Assert.That(Redis.Echo("Hello"), Is.EqualTo("Hello")); + } - Assert.That(Redis.ContainsKey("key"), Is.True); - Thread.Sleep(2000); - Assert.That(Redis.ContainsKey("key"), Is.False); - } - - [Test] - public void Can_GetTimeToLive() - { - Redis.SetEntry("key", "val"); - Redis.Expire("key", 10); - - var ttl = Redis.GetTimeToLive("key"); - Assert.That(ttl.TotalSeconds, Is.GreaterThanOrEqualTo(9)); - Thread.Sleep(1700); - - ttl = Redis.GetTimeToLive("key"); - Assert.That(ttl.TotalSeconds, Is.LessThanOrEqualTo(9)); - } - - [Test] - public void Can_Ping() - { - Assert.That(Redis.Ping(), Is.True); - } + [Test] + public void Can_SlaveOfNoOne() + { + Redis.SlaveOfNoOne(); + } - [Test] - public void Can_Echo() - { - Assert.That(Redis.Echo("Hello"), Is.EqualTo("Hello")); - } + [Test] + public void Can_Save() + { + try + { + Redis.Save(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; - [Test] - public void Can_SlaveOfNoOne() - { - Redis.SlaveOfNoOne(); - } + throw; + } + } - [Test] - public void Can_Save() - { - Redis.Save(); - } - - [Test] - public void Can_BgSave() - { + [Test] + public void Can_BgSave() + { try { Redis.BgSave(); - } - catch(RedisResponseException e) + } + catch (RedisResponseException e) { // if exception has that message then it still proves that BgSave works as expected. - if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress")) return; + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; + throw; } - } - - [Test] - public void Can_Quit() - { - Redis.Quit(); - Redis.NamespacePrefix = null; - CleanMask = null; - } - - [Test] - public void Can_BgRewriteAof() - { - Redis.BgRewriteAof(); - } - - [Test] - [Ignore("Works too well and shutdown the server")] - public void Can_Shutdown() - { - Redis.Shutdown(); - } - - [Test] - public void Can_get_Keys_with_pattern() - { - 5.Times(i => Redis.SetEntry("k1:" + i, "val")); - 5.Times(i => Redis.SetEntry("k2:" + i, "val")); - - var keys = Redis.Keys("k1:*"); - Assert.That(keys.Length, Is.EqualTo(5)); - } - - [Test] - public void Can_GetAll() - { - var keysMap = new Dictionary(); - - 10.Times(i => keysMap.Add("key" + i, "val" + i)); - - Redis.SetAll(keysMap); - - var map = Redis.GetAll(keysMap.Keys); - var mapKeys = Redis.GetValues(keysMap.Keys.ToList()); - - foreach (var entry in keysMap) - { - Assert.That(map.ContainsKey(entry.Key), Is.True); - Assert.That(mapKeys.Contains(entry.Value), Is.True); - } - } - - [Test] - public void Can_GetValues_JSON_strings() - { - var val = "{\"AuthorId\":0,\"Created\":\"\\/Date(1345961754013)\\/\",\"Name\":\"test\",\"Base64\":\"BQELAAEBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP8BWAFYAViA/wFYAVgBWID/AVgBWAFYgP8BWAFYAViA/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWID/gP+A/wFYgP+A/4D/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/AViA/4D/AVgBWAFYgP8BWAFYAViA/4D/AViA/4D/gP+A/4D/gP8BWAFYgP+A/wFYgP+A/wFYgP+A/4D/gP+A/wFYgP+A/wFYgP+A/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWAFYAViA/wFYAVgBWID/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}"; - - Redis.SetEntry("UserLevel/1", val); + } + + [Test] + public void Can_Quit() + { + Redis.Quit(); + Redis.NamespacePrefix = null; + CleanMask = null; + } + + [Test] + public void Can_BgRewriteAof() + { + Redis.BgRewriteAof(); + } + + [Test] + [Ignore("Works too well and shutdown the server")] + public void Can_Shutdown() + { + Redis.Shutdown(); + } + + [Test] + public void Can_get_Keys_with_pattern() + { + 5.Times(i => Redis.SetValue("k1:" + i, "val")); + 5.Times(i => Redis.SetValue("k2:" + i, "val")); + + var keys = Redis.Keys("k1:*"); + Assert.That(keys.Length, Is.EqualTo(5)); + + var scanKeys = Redis.ScanAllKeys("k1:*").ToArray(); + Assert.That(scanKeys.Length, Is.EqualTo(5)); + } + + [Test] + public void Can_GetAll() + { + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add("key" + i, "val" + i)); + + Redis.SetAll(keysMap); + + var map = Redis.GetAll(keysMap.Keys); + var mapKeys = Redis.GetValues(keysMap.Keys.ToList()); + + foreach (var entry in keysMap) + { + Assert.That(map.ContainsKey(entry.Key), Is.True); + Assert.That(mapKeys.Contains(entry.Value), Is.True); + } + } + + [Test] + public void Can_GetValues_JSON_strings() + { + var val = "{\"AuthorId\":0,\"Created\":\"\\/Date(1345961754013)\\/\",\"Name\":\"test\",\"Base64\":\"BQELAAEBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP8BWAFYAViA/wFYAVgBWID/AVgBWAFYgP8BWAFYAViA/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWID/gP+A/wFYgP+A/4D/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/AViA/4D/AVgBWAFYgP8BWAFYAViA/4D/AViA/4D/gP+A/4D/gP8BWAFYgP+A/wFYgP+A/wFYgP+A/4D/gP+A/wFYgP+A/wFYgP+A/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWAFYAViA/wFYAVgBWID/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}"; + + Redis.SetValue("UserLevel/1", val); var vals = Redis.GetValues(new List(new[] { "UserLevel/1" })); Assert.That(vals.Count, Is.EqualTo(1)); Assert.That(vals[0], Is.EqualTo(val)); - } + } - [Test] - public void Can_AcquireLock() - { - var key = PrefixedKey("AcquireLockKey"); - var lockKey = PrefixedKey("Can_AcquireLock"); + [Test] + public void Can_AcquireLock() + { + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); Redis.IncrementValue(key); //1 - var asyncResults = 5.TimesAsync(i => - IncrementKeyInsideLock(key, lockKey, i, new RedisClient(TestConfig.SingleHost) { NamespacePrefix = Redis.NamespacePrefix })); + var asyncResults = 5.TimesAsync(i => + IncrementKeyInsideLock(key, lockKey, i, new RedisClient(TestConfig.SingleHost) { NamespacePrefix = Redis.NamespacePrefix })); - asyncResults.WaitAll(TimeSpan.FromSeconds(5)); + asyncResults.WaitAll(TimeSpan.FromSeconds(5)); var val = Redis.Get(key); - Assert.That(val, Is.EqualTo(1 + 5)); - } + Assert.That(val, Is.EqualTo(1 + 5)); + } - private void IncrementKeyInsideLock(String key, String lockKey, int clientNo, IRedisClient client) - { + private void IncrementKeyInsideLock(String key, String lockKey, int clientNo, IRedisClient client) + { using (client.AcquireLock(lockKey)) - { + { Debug.WriteLine(String.Format("client {0} acquired lock", clientNo)); var val = client.Get(key); - Thread.Sleep(200); + Thread.Sleep(200); - client.Set(key, val + 1); - Debug.WriteLine(String.Format("client {0} released lock", clientNo)); - } - } + client.Set(key, val + 1); + Debug.WriteLine(String.Format("client {0} released lock", clientNo)); + } + } - [Test] - public void Can_AcquireLock_TimeOut() - { - var key = PrefixedKey("AcquireLockKeyTimeOut"); - var lockKey = PrefixedKey("Can_AcquireLock_TimeOut"); + [Test] + public void Can_AcquireLock_TimeOut() + { + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); Redis.IncrementValue(key); //1 var acquiredLock = Redis.AcquireLock(lockKey); - var waitFor = TimeSpan.FromMilliseconds(1000); - var now = DateTime.Now; - - try - { - using (var client = new RedisClient(TestConfig.SingleHost)) - { - using (client.AcquireLock(lockKey, waitFor)) - { + var waitFor = TimeSpan.FromMilliseconds(1000); + var now = DateTime.Now; + + try + { + using (var client = new RedisClient(TestConfig.SingleHost)) + { + using (client.AcquireLock(lockKey, waitFor)) + { Redis.IncrementValue(key); //2 - } - } - } - catch (TimeoutException tex) - { + } + } + } + catch (TimeoutException) + { var val = Redis.Get(key); - Assert.That(val, Is.EqualTo(1)); + Assert.That(val, Is.EqualTo(1)); - var timeTaken = DateTime.Now - now; - Assert.That(timeTaken.TotalMilliseconds > waitFor.TotalMilliseconds, Is.True); - Assert.That(timeTaken.TotalMilliseconds < waitFor.TotalMilliseconds + 1000, Is.True); - return; - } - Assert.Fail("should have Timed out"); - } + var timeTaken = DateTime.Now - now; + Assert.That(timeTaken.TotalMilliseconds > waitFor.TotalMilliseconds, Is.True); + Assert.That(timeTaken.TotalMilliseconds < waitFor.TotalMilliseconds + 1000, Is.True); + return; + } + finally + { + Redis.Remove(key); + Redis.Remove(lockKey); + } + Assert.Fail("should have Timed out"); + } - [Test] - public void Can_Append() - { - const string expectedString = "Hello, " + "World!"; - Redis.SetEntry("key", "Hello, "); - var currentLength = Redis.AppendToValue("key", "World!"); + [Test] + public void Can_Append() + { + const string expectedString = "Hello, " + "World!"; + Redis.SetValue("key", "Hello, "); + var currentLength = Redis.AppendToValue("key", "World!"); - Assert.That(currentLength, Is.EqualTo(expectedString.Length)); + Assert.That(currentLength, Is.EqualTo(expectedString.Length)); - var val = Redis.GetValue("key"); - Assert.That(val, Is.EqualTo(expectedString)); - } + var val = Redis.GetValue("key"); + Assert.That(val, Is.EqualTo(expectedString)); + } - [Test] - public void Can_Substring() - { - const string helloWorld = "Hello, World!"; - Redis.SetEntry("key", helloWorld); + [Test] + public void Can_GetRange() + { + const string helloWorld = "Hello, World!"; + Redis.SetValue("key", helloWorld); - var fromIndex = "Hello, ".Length; - var toIndex = "Hello, World".Length - 1; + var fromIndex = "Hello, ".Length; + var toIndex = "Hello, World".Length - 1; - var expectedString = helloWorld.Substring(fromIndex, toIndex - fromIndex + 1); - var world = Redis.GetSubstring("key", fromIndex, toIndex); + var expectedString = helloWorld.Substring(fromIndex, toIndex - fromIndex + 1); + var world = Redis.GetRange("key", fromIndex, toIndex); - Assert.That(world.Length, Is.EqualTo(expectedString.Length)); - } + Assert.That(world.Length, Is.EqualTo(expectedString.Length)); + } [Test] public void Can_create_distributed_lock() @@ -447,12 +504,13 @@ public void Can_store_multiple_keys() var keys = 5.Times(x => "key" + x); var vals = 5.Times(x => "val" + x); - var redis = RedisClient.New(); - - redis.SetAll(keys, vals); + using (var redis = RedisClient.New()) + { + redis.SetAll(keys, vals); - var all = redis.GetValues(keys); - Assert.AreEqual(vals, all); + var all = redis.GetValues(keys); + Assert.AreEqual(vals, all); + } } [Test] @@ -463,12 +521,13 @@ public void Can_store_Dictionary() var map = new Dictionary(); keys.ForEach(x => map[x] = "val" + x); - var redis = RedisClient.New(); - - redis.SetAll(map); + using (var redis = RedisClient.New()) + { + redis.SetAll(map); - var all = redis.GetValuesMap(keys); - Assert.AreEqual(map, all); + var all = redis.GetValuesMap(keys); + Assert.AreEqual(map, all); + } } [Test] @@ -478,11 +537,13 @@ public void Can_store_Dictionary_as_objects() map["key_a"] = "123"; map["key_b"] = null; - var redis = RedisClient.New(); - redis.SetAll(map); + using (var redis = RedisClient.New()) + { + redis.SetAll(map); - Assert.That(redis.Get("key_a"), Is.EqualTo("123")); - Assert.That(redis.Get("key_b"), Is.EqualTo("")); + Assert.That(redis.Get("key_a"), Is.EqualTo("123")); + Assert.That(redis.Get("key_b"), Is.EqualTo("")); + } } @@ -493,36 +554,117 @@ public void Can_store_Dictionary_as_bytes() map["key_a"] = "123".ToUtf8Bytes(); map["key_b"] = null; - var redis = RedisClient.New(); - redis.SetAll(map); + using (var redis = RedisClient.New()) + { + redis.SetAll(map); - Assert.That(redis.Get("key_a"), Is.EqualTo("123")); - Assert.That(redis.Get("key_b"), Is.EqualTo("")); + Assert.That(redis.Get("key_a"), Is.EqualTo("123")); + Assert.That(redis.Get("key_b"), Is.EqualTo("")); + } } [Test] public void Should_reset_slowlog() { - var redis = RedisClient.New(); - redis.SlowlogReset(); + using (var redis = RedisClient.New()) + { + redis.SlowlogReset(); + } } [Test] - public void Can_get_showlog() + public void Can_get_slowlog() { - var redis = RedisClient.New(); - var log = redis.GetSlowlog(10); + using (var redis = RedisClient.New()) + { + var log = redis.GetSlowlog(10); + + foreach (var t in log) + { + Console.WriteLine(t.Id); + Console.WriteLine(t.Duration); + Console.WriteLine(t.Timestamp); + Console.WriteLine(string.Join(":", t.Arguments)); + } + } + } + - foreach (var t in log) + [Test] + public void Can_change_db_at_runtime() + { + using (var redis = new RedisClient(TestConfig.SingleHost, TestConfig.RedisPort, db: 1)) { - Console.WriteLine(t.Id); - Console.WriteLine(t.Duration); - Console.WriteLine(t.Timestamp); - Console.WriteLine(string.Join(":", t.Arguments)); + var val = Environment.TickCount; + var key = "test" + val; + try + { + redis.Set(key, val); + redis.ChangeDb(2); + Assert.That(redis.Get(key), Is.EqualTo(0)); + redis.ChangeDb(1); + Assert.That(redis.Get(key), Is.EqualTo(val)); + redis.Dispose(); + } + finally + { + redis.ChangeDb(1); + redis.Del(key); + } } } + [Test] + public void Can_Set_Expire_Seconds() + { + Redis.SetValue("key", "val", expireIn: TimeSpan.FromSeconds(1)); + Assert.That(Redis.ContainsKey("key"), Is.True); + Thread.Sleep(2000); + Assert.That(Redis.ContainsKey("key"), Is.False); + } + + [Test] + public void Can_Set_Expire_MilliSeconds() + { + Redis.SetValue("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Assert.That(Redis.ContainsKey("key"), Is.True); + Thread.Sleep(2000); + Assert.That(Redis.ContainsKey("key"), Is.False); + } - } + [Test] + public void Can_Set_Expire_Seconds_if_exists() + { + Assert.That(Redis.SetValueIfExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1500)), + Is.False); + Assert.That(Redis.ContainsKey("key"), Is.False); + + Redis.SetValue("key", "val"); + Assert.That(Redis.SetValueIfExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.True); + Assert.That(Redis.ContainsKey("key"), Is.True); + + Thread.Sleep(2000); + Assert.That(Redis.ContainsKey("key"), Is.False); + } + + [Test] + public void Can_Set_Expire_Seconds_if_not_exists() + { + Assert.That(Redis.SetValueIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.True); + Assert.That(Redis.ContainsKey("key"), Is.True); + + Assert.That(Redis.SetValueIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.False); + + Thread.Sleep(2000); + Assert.That(Redis.ContainsKey("key"), Is.False); + + Redis.Remove("key"); + Redis.SetValueIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Assert.That(Redis.ContainsKey("key"), Is.True); + } + } } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.Async.cs new file mode 100644 index 00000000..40d56a42 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.Async.cs @@ -0,0 +1,126 @@ +using NUnit.Framework; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + public static class AsyncExtensions + { + public static async ValueTask ForEachAsync(this List source, Func action) + { + foreach (var item in source) + await action(item).ConfigureAwait(false); + } + public static async ValueTask ForEachAsync(this Dictionary source, Func action) + { + foreach (var item in source) + await action(item.Key, item.Value).ConfigureAwait(false); + } + public static async ValueTask TimesAsync(this int times, Func action) + { + for (int i = 0; i < times; i++) + { + await action(i).ConfigureAwait(false); + } + } + public static async ValueTask> ToListAsync(this IAsyncEnumerable source) + { + var list = new List(); + await foreach (var item in source.ConfigureAwait(false)) + list.Add(item); + return list; + } + + public static async ValueTask CountAsync(this IAsyncEnumerable source) + { + int count = 0; + await foreach (var item in source.ConfigureAwait(false)) + count++; + return count; + } + + public static IRedisClientAsync ForAsyncOnly(this RedisClient client) + { +#if DEBUG + if (client is object) client.DebugAllowSync = false; +#endif + return client; + } + + public static async IAsyncEnumerable TakeAsync(this IAsyncEnumerable source, int count) + { + await foreach (var item in source.ConfigureAwait(false)) + { + if (count > 0) + { + count--; + yield return item; + } + } + } + + public static async ValueTask> ToDictionaryAsync(this IAsyncEnumerable source, Func keySelector, Func valueSelector) + { + var result = new Dictionary(); + await foreach (var item in source.ConfigureAwait(false)) + { + result.Add(keySelector(item), valueSelector(item)); + } + return result; + } + } + public class RedisClientTestsBaseAsyncTests // testing the base class features + : RedisClientTestsBaseAsync + { + [Test] + public void DetectUnexpectedSync() + { + #if DEBUG + Assert.False(RedisRaw.DebugAllowSync, nameof(RedisRaw.DebugAllowSync)); + var ex = Assert.Throws(() => RedisRaw.Ping()); + Assert.AreEqual("Unexpected synchronous operation detected from 'SendReceive'", ex.Message); + #endif + } + } + + [Category("Async")] + public abstract class RedisClientTestsBaseAsync : RedisClientTestsBase + { + protected IRedisClientAsync RedisAsync => base.Redis; + protected IRedisNativeClientAsync NativeAsync => base.Redis; + + [Obsolete("This should use RedisAsync or RedisRaw", true)] + protected new RedisClient Redis => base.Redis; + + protected RedisClient RedisRaw + { + get => base.Redis; + set => base.Redis = value; + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + _ = RedisRaw.ForAsyncOnly(); + } + public override void OnAfterEachTest() + { +#if DEBUG + if(RedisRaw is object) RedisRaw.DebugAllowSync = true; +#endif + base.OnAfterEachTest(); + } + + protected static async ValueTask> ToListAsync(IAsyncEnumerable source, CancellationToken token = default) + { + var list = new List(); + await foreach (var value in source.ConfigureAwait(false).WithCancellation(token)) + { + list.Add(value); + } + return list; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs index dc52dc21..087c1c4f 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs @@ -1,33 +1,57 @@ -using System; +using NUnit.Framework; using System.Diagnostics; using System.Text; -using NUnit.Framework; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class RedisClientTestsBase - { - protected string CleanMask = null; - protected RedisClient Redis; - - protected void Log(string fmt, params object[] args) - { - Debug.WriteLine("{0}", string.Format(fmt, args).Trim()); - } - - [SetUp] - public virtual void OnBeforeEachTest() - { - Redis = new RedisClient(TestConfig.SingleHost); - } + public class RedisClientTestsBase + { + protected string CleanMask = null; + protected RedisClient Redis; + + protected void Log(string fmt, params object[] args) + { + Debug.WriteLine("{0}", string.Format(fmt, args).Trim()); + } + + [OneTimeSetUp] + public virtual void OnBeforeTestFixture() + { + RedisClient.NewFactoryFn = () => new RedisClient(TestConfig.SingleHost); + using (var redis = RedisClient.New()) + { + redis.FlushAll(); + } + } + + [OneTimeTearDown] + public virtual void OnAfterTestFixture() + { + } + + [SetUp] + public virtual void OnBeforeEachTest() + { + Redis = RedisClient.New(); + } [TearDown] - public virtual void TearDown() + public virtual void OnAfterEachTest() { - if (Redis.NamespacePrefix != null && CleanMask == null) CleanMask = Redis.NamespacePrefix + "*"; - if (CleanMask != null) Redis.SearchKeys(CleanMask).ForEach(t => Redis.Del(t)); - Redis.Dispose(); + try + { + if (Redis.NamespacePrefix != null && CleanMask == null) CleanMask = Redis.NamespacePrefix + "*"; + if (CleanMask != null) Redis.SearchKeys(CleanMask).ForEach(t => Redis.Del(t)); + Redis.Dispose(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress")) + return; + + throw; + } } protected string PrefixedKey(string key) @@ -35,26 +59,26 @@ protected string PrefixedKey(string key) return string.Concat(Redis.NamespacePrefix, key); } - public RedisClient GetRedisClient() - { - var client = new RedisClient(TestConfig.SingleHost); - return client; - } - - public RedisClient CreateRedisClient() - { - var client = new RedisClient(TestConfig.SingleHost); - return client; - } - - public string GetString(byte[] stringBytes) - { - return Encoding.UTF8.GetString(stringBytes); - } - - public byte[] GetBytes(string stringValue) - { - return Encoding.UTF8.GetBytes(stringValue); - } - } + public RedisClient GetRedisClient() + { + var client = new RedisClient(TestConfig.SingleHost); + return client; + } + + public RedisClient CreateRedisClient() + { + var client = new RedisClient(TestConfig.SingleHost); + return client; + } + + public string GetString(byte[] stringBytes) + { + return Encoding.UTF8.GetString(stringBytes); + } + + public byte[] GetBytes(string stringValue) + { + return Encoding.UTF8.GetBytes(stringValue); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs index 8cea3aa6..804681a2 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs @@ -1,156 +1,156 @@ using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Common.Tests.Models; using ServiceStack.Redis.Generic; namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisClientsManagerExtensionsTests - { - private IRedisClientsManager redisManager; - - [SetUp] - public void OnBeforeEachTest() - { - if (redisManager != null) redisManager.Dispose(); - redisManager = new BasicRedisClientManager(TestConfig.SingleHost); - redisManager.Exec(r => r.FlushAll()); - } - - [Test] - public void Can_Exec_Action() - { - redisManager.Exec(r => - { - r.Increment("key", 1); - Assert.That(r.Get("key"), Is.EqualTo(1)); - }); - } - - [Test] - public void Can_Exec_Func_string() - { - string value = redisManager.Exec(r => - { - r.SetEntry("key", "value"); - return r.GetValue("key"); - }); - Assert.That(value, Is.EqualTo("value")); - } - - [Test] - public void Can_Exec_Func_long() - { - long value = redisManager.Exec(r => r.Increment("key", 1)); - Assert.That(value, Is.EqualTo(1)); - } - - [Test] - public void Can_Exec_Func_int() - { - int value = redisManager.Exec(r => - { - r.AddItemToList("list", "value"); - return r.GetListCount("list"); - }); - Assert.That(value, Is.EqualTo(1)); - } - - [Test] - public void Can_Exec_Func_double() - { - double value = redisManager.Exec(r => - { - r.AddItemToSortedSet("zset", "value", 1.1d); - return r.GetItemScoreInSortedSet("zset", "value"); - }); - - Assert.That(value, Is.EqualTo(1.1d)); - } - - [Test] - public void Can_Exec_Func_bool() - { - bool value = redisManager.Exec(r => - { - r.AddItemToSet("set", "item"); - return r.SetContainsItem("set", "item"); - }); - - Assert.That(value, Is.True); - } - - [Test] - public void Can_Exec_Transaction_Action() - { - var value = false; - redisManager.ExecTrans(trans => - { - trans.QueueCommand(r => r.AddItemToSet("set", "item")); - trans.QueueCommand(r => r.SetContainsItem("set", "item"), x => value = x); - }); - - Assert.That(value, Is.True); - } - - [Test] - public void Can_ExecAs_ModelWithIdAndName_Action() - { - var expected = ModelWithIdAndName.Create(1); - redisManager.ExecAs(m => - { - m.Store(expected); - var actual = m.GetById(expected.Id); - Assert.That(actual, Is.EqualTo(expected)); - }); - } - - [Test] - public void Can_ExecAs_ModelWithIdAndName_Func() - { - var expected = ModelWithIdAndName.Create(1); - ModelWithIdAndName actual = redisManager.ExecAs(m => - { - m.Store(expected); - return m.GetById(expected.Id); - }); - Assert.That(actual, Is.EqualTo(expected)); - } - - [Test] - public void Can_ExecAs_ModelWithIdAndName_Func_IList() - { - var expected = new[] { - ModelWithIdAndName.Create(1), - ModelWithIdAndName.Create(2), - ModelWithIdAndName.Create(3), - }; - IList actual = redisManager.ExecAs(m => - { - var list = m.Lists["typed-list"]; - list.AddRange(expected); - return (IList)list.GetAll(); - }); - Assert.That(actual.EquivalentTo(expected)); - } - - [Test] - public void Can_ExecAs_ModelWithIdAndName_Func_List() - { - var expected = new[] { - ModelWithIdAndName.Create(1), - ModelWithIdAndName.Create(2), - ModelWithIdAndName.Create(3), - }; - List actual = redisManager.ExecAs(m => - { - var list = m.Lists["typed-list"]; - list.AddRange(expected); - return list.GetAll(); - }); - Assert.That(actual.EquivalentTo(expected)); - } - } + [TestFixture] + public class RedisClientsManagerExtensionsTests + { + private IRedisClientsManager redisManager; + + [SetUp] + public void OnBeforeEachTest() + { + if (redisManager != null) redisManager.Dispose(); + redisManager = TestConfig.BasicClientManger; + redisManager.Exec(r => r.FlushAll()); + } + + [Test] + public void Can_Exec_Action() + { + redisManager.Exec(r => + { + r.Increment("key", 1); + Assert.That(r.Get("key"), Is.EqualTo(1)); + }); + } + + [Test] + public void Can_Exec_Func_string() + { + string value = redisManager.Exec(r => + { + r.SetValue("key", "value"); + return r.GetValue("key"); + }); + Assert.That(value, Is.EqualTo("value")); + } + + [Test] + public void Can_Exec_Func_long() + { + long value = redisManager.Exec(r => r.Increment("key", 1)); + Assert.That(value, Is.EqualTo(1)); + } + + [Test] + public void Can_Exec_Func_int() + { + long value = redisManager.Exec(r => + { + r.AddItemToList("list", "value"); + return r.GetListCount("list"); + }); + Assert.That(value, Is.EqualTo(1)); + } + + [Test] + public void Can_Exec_Func_double() + { + double value = redisManager.Exec(r => + { + r.AddItemToSortedSet("zset", "value", 1.1d); + return r.GetItemScoreInSortedSet("zset", "value"); + }); + + Assert.That(value, Is.EqualTo(1.1d)); + } + + [Test] + public void Can_Exec_Func_bool() + { + bool value = redisManager.Exec(r => + { + r.AddItemToSet("set", "item"); + return r.SetContainsItem("set", "item"); + }); + + Assert.That(value, Is.True); + } + + [Test] + public void Can_Exec_Transaction_Action() + { + var value = false; + redisManager.ExecTrans(trans => + { + trans.QueueCommand(r => r.AddItemToSet("set", "item")); + trans.QueueCommand(r => r.SetContainsItem("set", "item"), x => value = x); + }); + + Assert.That(value, Is.True); + } + + [Test] + public void Can_ExecAs_ModelWithIdAndName_Action() + { + var expected = ModelWithIdAndName.Create(1); + redisManager.ExecAs(m => + { + m.Store(expected); + var actual = m.GetById(expected.Id); + Assert.That(actual, Is.EqualTo(expected)); + }); + } + + [Test] + public void Can_ExecAs_ModelWithIdAndName_Func() + { + var expected = ModelWithIdAndName.Create(1); + ModelWithIdAndName actual = redisManager.ExecAs(m => + { + m.Store(expected); + return m.GetById(expected.Id); + }); + Assert.That(actual, Is.EqualTo(expected)); + } + + [Test] + public void Can_ExecAs_ModelWithIdAndName_Func_IList() + { + var expected = new[] { + ModelWithIdAndName.Create(1), + ModelWithIdAndName.Create(2), + ModelWithIdAndName.Create(3), + }; + IList actual = redisManager.ExecAs(m => + { + var list = m.Lists["typed-list"]; + list.AddRange(expected); + return (IList)list.GetAll(); + }); + Assert.That(actual.EquivalentTo(expected)); + } + + [Test] + public void Can_ExecAs_ModelWithIdAndName_Func_List() + { + var expected = new[] { + ModelWithIdAndName.Create(1), + ModelWithIdAndName.Create(2), + ModelWithIdAndName.Create(3), + }; + List actual = redisManager.ExecAs(m => + { + var list = m.Lists["typed-list"]; + list.AddRange(expected); + return list.GetAll(); + }); + Assert.That(actual.EquivalentTo(expected)); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisExtensionTests.cs b/tests/ServiceStack.Redis.Tests/RedisExtensionTests.cs index 11d88924..da0c5e3d 100644 --- a/tests/ServiceStack.Redis.Tests/RedisExtensionTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisExtensionTests.cs @@ -12,7 +12,7 @@ public class RedisExtensionTests [Test] public void Can_Parse_Host() { - var hosts = new[] {"pass@host.com:6123"}; + var hosts = new[] { "pass@host.com:6123" }; var endPoints = hosts.ToRedisEndPoints(); Assert.AreEqual(1, endPoints.Count); @@ -26,7 +26,7 @@ public void Can_Parse_Host() [Test] public void Host_May_Contain_AtChar() { - var hosts = new[] {"@pa1@ss@localhost:6123"}; + var hosts = new[] { "@pa1@ss@localhost:6123" }; var endPoints = hosts.ToRedisEndPoints(); Assert.AreEqual(1, endPoints.Count); diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.Async.cs new file mode 100644 index 00000000..c6f267c3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.Async.cs @@ -0,0 +1,286 @@ +using NUnit.Framework; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + [Ignore("CI requires redis-server v3.2.0")] + public class RedisGeoNativeClientTestsAsync + { + private readonly IRedisNativeClientAsync redis; + + public RedisGeoNativeClientTestsAsync() + { + redis = new RedisNativeClient(TestConfig.GeoHost); + } + + [OneTimeTearDown] + public async Task OneTimeTearDown() + { + await redis.DisposeAsync(); + } + + [Test] + public async Task Can_GeoAdd_and_GeoPos() + { + await redis.FlushDbAsync(); + var count = await redis.GeoAddAsync("Sicily", 13.361389, 38.115556, "Palermo"); + Assert.That(count, Is.EqualTo(1)); + var results = await redis.GeoPosAsync("Sicily", new[] { "Palermo" }); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + } + + [Test] + public async Task GeoPos_on_NonExistingMember_returns_no_results() + { + await redis.FlushDbAsync(); + var count = await redis.GeoAddAsync("Sicily", 13.361389, 38.115556, "Palermo"); + var results = await redis.GeoPosAsync("Sicily", new[] { "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(0)); + + results = await redis.GeoPosAsync("Sicily", new[] { "Palermo", "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(1)); + } + + [Test] + public async Task Can_GeoAdd_and_GeoPos_multiple() + { + await redis.FlushDbAsync(); + var count = await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + Assert.That(count, Is.EqualTo(2)); + + var results = await redis.GeoPosAsync("Sicily", new[] { "Palermo", "Catania" }); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + } + + [Test] + public async Task Can_GeoDist() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.GeoDistAsync("Sicily", "Palermo", "Catania"); + Assert.That(distance, Is.EqualTo(166274.15156960039).Within(.1)); + } + + [Test] + public async Task GeoDist_on_NonExistingMember_returns_NaN() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.GeoDistAsync("Sicily", "Palermo", "NonExistingMember"); + Assert.That(distance, Is.EqualTo(double.NaN)); + } + + [Test] + public async Task Can_GeoHash() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var hashes = await redis.GeoHashAsync("Sicily", new[] { "Palermo", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.EqualTo("sqdtr74hyu0")); + + hashes = await redis.GeoHashAsync("Sicily", new[] { "Palermo", "NonExistingMember", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.Null); + Assert.That(hashes[2], Is.EqualTo("sqdtr74hyu0")); + } + + [Test] + public async Task Can_GeoRadius_default() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public async Task Can_GeoRadiusByMember_default() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusByMemberAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public async Task Can_GeoRadius_WithCoord() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, withCoords: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + } + + [Test] + public async Task Can_GeoRadius_WithDist() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, withDist: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + } + + [Test] + public async Task Can_GeoRadius_WithCoord_WithDist_WithHash() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479447370796909)); + } + + [Test] + public async Task Can_GeoRadiusByMember_WithCoord_WithDist_WithHash() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusByMemberAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.583333).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.316667).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(0)); + Assert.That(results[0].Hash, Is.EqualTo(3479030013248308)); + + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(90.9778).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479099956230698)); + } + + [Test] + public async Task Can_GeoRadius_WithCoord_WithDist_WithHash_Count_and_Asc() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true, count:1, asc:false); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true, count: 1, asc: true); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Catania")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479447370796909)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs new file mode 100644 index 00000000..bace21cf --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs @@ -0,0 +1,275 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + [Ignore("CI requires redis-server v3.2.0")] + public class RedisGeoNativeClientTests + { + private readonly RedisNativeClient redis; + + public RedisGeoNativeClientTests() + { + redis = new RedisNativeClient(TestConfig.GeoHost); + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + redis.Dispose(); + } + + [Test] + public void Can_GeoAdd_and_GeoPos() + { + redis.FlushDb(); + var count = redis.GeoAdd("Sicily", 13.361389, 38.115556, "Palermo"); + Assert.That(count, Is.EqualTo(1)); + var results = redis.GeoPos("Sicily", "Palermo"); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + } + + [Test] + public void GeoPos_on_NonExistingMember_returns_no_results() + { + redis.FlushDb(); + var count = redis.GeoAdd("Sicily", 13.361389, 38.115556, "Palermo"); + var results = redis.GeoPos("Sicily", "NonExistingMember"); + Assert.That(results.Count, Is.EqualTo(0)); + + results = redis.GeoPos("Sicily", "Palermo", "NonExistingMember"); + Assert.That(results.Count, Is.EqualTo(1)); + } + + [Test] + public void Can_GeoAdd_and_GeoPos_multiple() + { + redis.FlushDb(); + var count = redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + Assert.That(count, Is.EqualTo(2)); + + var results = redis.GeoPos("Sicily", "Palermo", "Catania"); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + } + + [Test] + public void Can_GeoDist() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var distance = redis.GeoDist("Sicily", "Palermo", "Catania"); + Assert.That(distance, Is.EqualTo(166274.15156960039).Within(.1)); + } + + [Test] + public void GeoDist_on_NonExistingMember_returns_NaN() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var distance = redis.GeoDist("Sicily", "Palermo", "NonExistingMember"); + Assert.That(distance, Is.EqualTo(double.NaN)); + } + + [Test] + public void Can_GeoHash() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var hashes = redis.GeoHash("Sicily", "Palermo", "Catania"); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.EqualTo("sqdtr74hyu0")); + + hashes = redis.GeoHash("Sicily", "Palermo", "NonExistingMember", "Catania"); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.Null); + Assert.That(hashes[2], Is.EqualTo("sqdtr74hyu0")); + } + + [Test] + public void Can_GeoRadius_default() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public void Can_GeoRadiusByMember_default() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadiusByMember("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public void Can_GeoRadius_WithCoord() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, withCoords: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + } + + [Test] + public void Can_GeoRadius_WithDist() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, withDist: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + } + + [Test] + public void Can_GeoRadius_WithCoord_WithDist_WithHash() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479447370796909)); + } + + [Test] + public void Can_GeoRadiusByMember_WithCoord_WithDist_WithHash() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadiusByMember("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.583333).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.316667).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(0)); + Assert.That(results[0].Hash, Is.EqualTo(3479030013248308)); + + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(90.9778).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479099956230698)); + } + + [Test] + public void Can_GeoRadius_WithCoord_WithDist_WithHash_Count_and_Asc() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true, count:1, asc:false); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + results = redis.GeoRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true, count: 1, asc: true); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Catania")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479447370796909)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisGeoTests.Async.cs new file mode 100644 index 00000000..f06280e2 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisGeoTests.Async.cs @@ -0,0 +1,242 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + [Ignore("CI requires redis-server v3.2.0")] + public class RedisGeoTestsAsync + { + private readonly IRedisClientAsync redis; + + public RedisGeoTestsAsync() + { + redis = new RedisClient(TestConfig.GeoHost); + } + + [OneTimeTearDown] + public async Task OneTimeTearDown() + { + if (redis is object) + { + await redis.DisposeAsync(); + } + } + + [Test] + public async Task Can_AddGeoMember_and_GetGeoCoordinates() + { + await redis.FlushDbAsync(); + var count = await redis.AddGeoMemberAsync("Sicily", 13.361389, 38.115556, "Palermo"); + Assert.That(count, Is.EqualTo(1)); + var results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "Palermo" }); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + } + + [Test] + public async Task GetGeoCoordinates_on_NonExistingMember_returns_no_results() + { + await redis.FlushDbAsync(); + var count = await redis.AddGeoMemberAsync("Sicily", 13.361389, 38.115556, "Palermo"); + var results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(0)); + + results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "Palermo", "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(1)); + } + + [Test] + public async Task Can_AddGeoMembers_and_GetGeoCoordinates_multiple() + { + await redis.FlushDbAsync(); + var count = await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + Assert.That(count, Is.EqualTo(2)); + + var results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "Palermo", "Catania" }); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + } + + [Test] + public async Task Can_CalculateDistanceBetweenGeoMembers() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.CalculateDistanceBetweenGeoMembersAsync("Sicily", "Palermo", "Catania"); + Assert.That(distance, Is.EqualTo(166274.15156960039).Within(.1)); + } + + [Test] + public async Task CalculateDistanceBetweenGeoMembers_on_NonExistingMember_returns_NaN() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.CalculateDistanceBetweenGeoMembersAsync("Sicily", "Palermo", "NonExistingMember"); + Assert.That(distance, Is.EqualTo(double.NaN)); + } + + [Test] + public async Task Can_GetGeohashes() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var hashes = await redis.GetGeohashesAsync("Sicily", new[] { "Palermo", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.EqualTo("sqdtr74hyu0")); + + hashes = await redis.GetGeohashesAsync("Sicily", new[] { "Palermo", "NonExistingMember", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.Null); + Assert.That(hashes[2], Is.EqualTo("sqdtr74hyu0")); + } + + [Test] + public async Task Can_FindGeoMembersInRadius() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoMembersInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Length, Is.EqualTo(2)); + Assert.That(results[0], Is.EqualTo("Palermo")); + Assert.That(results[1], Is.EqualTo("Catania")); + } + + //[Test] // method does not exist on IRedisClient/IRedisClientAsync + //public async Task Can_GeoRadiusByMember() + //{ + // await redis.FlushDbAsync(); + // await redis.AddGeoMembersAsync("Sicily", new[] { + // new RedisGeo(13.583333, 37.316667, "Agrigento"), + // new RedisGeo(13.361389, 38.115556, "Palermo"), + // new RedisGeo(15.087269, 37.502669, "Catania") + // }); + + // var results = await redis.GeoRadiusByMemberAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + // Assert.That(results.Count, Is.EqualTo(2)); + // Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + // Assert.That(results[0].Unit, Is.Null); + // Assert.That(results[1].Member, Is.EqualTo("Palermo")); + // Assert.That(results[1].Unit, Is.Null); + //} + + [Test] + public async Task Can_FindGeoResultsInRadius() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoResultsInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479447370796909)); + } + + [Test] + public async Task Can_FindGeoResultsInRadius_by_Member() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoResultsInRadiusAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.583333).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.316667).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(0)); + Assert.That(results[0].Hash, Is.EqualTo(3479030013248308)); + + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(90.9778).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479099956230698)); + } + + [Test] + public async Task Can_GeoRadius_WithCoord_WithDist_WithHash_Count_and_Asc() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoResultsInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + count: 1, sortByNearest: false); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + results = await redis.FindGeoResultsInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + count: 1, sortByNearest: true); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Catania")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479447370796909)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs b/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs new file mode 100644 index 00000000..32248658 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs @@ -0,0 +1,229 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + [Ignore("CI requires redis-server v3.2.0")] + public class RedisGeoTests + { + private readonly RedisClient redis; + + public RedisGeoTests() + { + redis = new RedisClient(TestConfig.GeoHost); + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + redis.Dispose(); + } + + [Test] + public void Can_AddGeoMember_and_GetGeoCoordinates() + { + redis.FlushDb(); + var count = redis.AddGeoMember("Sicily", 13.361389, 38.115556, "Palermo"); + Assert.That(count, Is.EqualTo(1)); + var results = redis.GetGeoCoordinates("Sicily", "Palermo"); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + } + + [Test] + public void GetGeoCoordinates_on_NonExistingMember_returns_no_results() + { + redis.FlushDb(); + var count = redis.AddGeoMember("Sicily", 13.361389, 38.115556, "Palermo"); + var results = redis.GetGeoCoordinates("Sicily", "NonExistingMember"); + Assert.That(results.Count, Is.EqualTo(0)); + + results = redis.GetGeoCoordinates("Sicily", "Palermo", "NonExistingMember"); + Assert.That(results.Count, Is.EqualTo(1)); + } + + [Test] + public void Can_AddGeoMembers_and_GetGeoCoordinates_multiple() + { + redis.FlushDb(); + var count = redis.AddGeoMembers("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + Assert.That(count, Is.EqualTo(2)); + + var results = redis.GetGeoCoordinates("Sicily", "Palermo", "Catania"); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + } + + [Test] + public void Can_CalculateDistanceBetweenGeoMembers() + { + redis.FlushDb(); + redis.AddGeoMembers("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var distance = redis.CalculateDistanceBetweenGeoMembers("Sicily", "Palermo", "Catania"); + Assert.That(distance, Is.EqualTo(166274.15156960039).Within(.1)); + } + + [Test] + public void CalculateDistanceBetweenGeoMembers_on_NonExistingMember_returns_NaN() + { + redis.FlushDb(); + redis.AddGeoMembers("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var distance = redis.CalculateDistanceBetweenGeoMembers("Sicily", "Palermo", "NonExistingMember"); + Assert.That(distance, Is.EqualTo(double.NaN)); + } + + [Test] + public void Can_GetGeohashes() + { + redis.FlushDb(); + redis.AddGeoMembers("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var hashes = redis.GetGeohashes("Sicily", "Palermo", "Catania"); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.EqualTo("sqdtr74hyu0")); + + hashes = redis.GetGeohashes("Sicily", "Palermo", "NonExistingMember", "Catania"); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.Null); + Assert.That(hashes[2], Is.EqualTo("sqdtr74hyu0")); + } + + [Test] + public void Can_FindGeoMembersInRadius() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.FindGeoMembersInRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Length, Is.EqualTo(2)); + Assert.That(results[0], Is.EqualTo("Palermo")); + Assert.That(results[1], Is.EqualTo("Catania")); + } + + [Test] + public void Can_GeoRadiusByMember() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.GeoRadiusByMember("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public void Can_FindGeoResultsInRadius() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.FindGeoResultsInRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479447370796909)); + } + + [Test] + public void Can_FindGeoResultsInRadius_by_Member() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.FindGeoResultsInRadius("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.583333).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.316667).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(0)); + Assert.That(results[0].Hash, Is.EqualTo(3479030013248308)); + + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(90.9778).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479099956230698)); + } + + [Test] + public void Can_GeoRadius_WithCoord_WithDist_WithHash_Count_and_Asc() + { + redis.FlushDb(); + redis.GeoAdd("Sicily", + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania")); + + var results = redis.FindGeoResultsInRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + count:1, sortByNearest:false); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + results = redis.FindGeoResultsInRadius("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + count: 1, sortByNearest:true); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Catania")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479447370796909)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.Async.cs new file mode 100644 index 00000000..31748b6e --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.Async.cs @@ -0,0 +1,36 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Ignore("Integration"), Category("Async")] + public class RedisHyperLogTestsAsync + { + const string Host = "localhost"; // "10.0.0.14" + private IRedisClientAsync Connect() => new RedisClient(Host); + + [Test] + public async Task Can_Add_to_Hyperlog() + { + await using var redis = Connect(); + + await redis.FlushAllAsync(); + + await redis.AddToHyperLogAsync("hyperlog", new[] { "a", "b", "c" }); + await redis.AddToHyperLogAsync("hyperlog", new[] { "c", "d" }); + + var count = await redis.CountHyperLogAsync("hyperlog"); + + Assert.That(count, Is.EqualTo(4)); + + await redis.AddToHyperLogAsync("hyperlog2", new[] { "c", "d", "e", "f" }); + + await redis.MergeHyperLogsAsync("hypermerge", new[] { "hyperlog", "hyperlog2" }); + + var mergeCount = await redis.CountHyperLogAsync("hypermerge"); + + Assert.That(mergeCount, Is.EqualTo(6)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs new file mode 100644 index 00000000..7a6af4d6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs @@ -0,0 +1,48 @@ +using System; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Ignore("Integration")] + public class RedisHyperLogTests + { + [Test] + public void Can_Add_to_Hyperlog() + { + var redis = new RedisClient("10.0.0.14"); + redis.FlushAll(); + + redis.AddToHyperLog("hyperlog", "a", "b", "c"); + redis.AddToHyperLog("hyperlog", "c", "d"); + + var count = redis.CountHyperLog("hyperlog"); + + Assert.That(count, Is.EqualTo(4)); + + redis.AddToHyperLog("hyperlog2", "c", "d", "e", "f"); + + redis.MergeHyperLogs("hypermerge", "hyperlog", "hyperlog2"); + + var mergeCount = redis.CountHyperLog("hypermerge"); + + Assert.That(mergeCount, Is.EqualTo(6)); + } + + [Test] + public void Test_on_old_redisserver() + { + var redis = new RedisClient("10.0.0.14"); + //var redis = new RedisClient(); + redis.FlushAll(); + + //redis.ExpireEntryIn("key", TimeSpan.FromDays(14)); + + redis.Set("key", "value", TimeSpan.FromDays(14)); + + var value = redis.Get("key"); + + value.FromUtf8Bytes().Print(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs b/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs new file mode 100644 index 00000000..a32f69aa --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs @@ -0,0 +1,246 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; +using NUnit.Framework; +using ServiceStack.Text; +#if NETCORE +using System.Threading.Tasks; +#endif + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisManagerPoolTests + { + readonly string[] hosts = new[] { + "readwrite1", "readwrite2:6000", "192.168.0.1", "localhost" + }; + + readonly string[] testReadOnlyHosts = new[] { + "read1", "read2:7000", "127.0.0.1" + }; + + private string firstReadWriteHost; + private string firstReadOnlyHost; + + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.VerifyMasterConnections = false; + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + RedisConfig.VerifyMasterConnections = true; + } + + [SetUp] + public void OnBeforeEachTest() + { + firstReadWriteHost = hosts[0]; + firstReadOnlyHost = testReadOnlyHosts[0]; + } + + public RedisManagerPool CreateManager() + { + return new RedisManagerPool(hosts); + } + + [Test] + public void Can_change_db_for_client() + { + using (var db1 = new RedisManagerPool(TestConfig.SingleHost + "?db=1")) + using (var db2 = new RedisManagerPool(TestConfig.SingleHost + "?db=2")) + { + var val = Environment.TickCount; + var key = "test" + val; + var db1c = db1.GetClient(); + var db2c = db2.GetClient(); + try + { + db1c.Set(key, val); + Assert.That(db2c.Get(key), Is.EqualTo(0)); + Assert.That(db1c.Get(key), Is.EqualTo(val)); + } + finally + { + db1c.Remove(key); + } + } + } + + [Test] + public void Can_get_ReadWrite_client() + { + using (var manager = CreateManager()) + { + var client = manager.GetClient(); + + AssertClientHasHost(client, firstReadWriteHost); + } + } + + private static void AssertClientHasHost(IRedisClient client, string hostWithOptionalPort) + { + var parts = hostWithOptionalPort.Split(':'); + var port = parts.Length > 1 ? int.Parse(parts[1]) : RedisConfig.DefaultPort; + + Assert.That(client.Host, Is.EqualTo(parts[0])); + Assert.That(client.Port, Is.EqualTo(port)); + } + + + [Test] + public void Does_loop_through_ReadWrite_hosts() + { + using (var manager = CreateManager()) + { + var client1 = manager.GetClient(); + client1.Dispose(); + var client2 = manager.GetClient(); + var client3 = manager.GetClient(); + var client4 = manager.GetClient(); + var client5 = manager.GetClient(); + + AssertClientHasHost(client1, hosts[0]); + AssertClientHasHost(client2, hosts[1]); + AssertClientHasHost(client3, hosts[2]); + AssertClientHasHost(client4, hosts[3]); + AssertClientHasHost(client5, hosts[0]); + } + } + + [Test] + public void Can_have_different_pool_size_and_host_configurations() + { + var writeHosts = new[] { "readwrite1" }; + + using (var manager = new RedisManagerPool( + writeHosts, + new RedisPoolConfig { MaxPoolSize = 4 })) + { + //A poolsize of 4 will not block getting 4 clients + using (var client1 = manager.GetClient()) + using (var client2 = manager.GetClient()) + using (var client3 = manager.GetClient()) + using (var client4 = manager.GetClient()) + { + AssertClientHasHost(client1, writeHosts[0]); + AssertClientHasHost(client2, writeHosts[0]); + AssertClientHasHost(client3, writeHosts[0]); + AssertClientHasHost(client4, writeHosts[0]); + } + } + } + + [Test] + public void Does_not_block_ReadWrite_clients_pool() + { + using (var manager = new RedisManagerPool( + hosts, + new RedisPoolConfig { MaxPoolSize = 4 })) + { + var delay = TimeSpan.FromSeconds(1); + var client1 = manager.GetClient(); + var client2 = manager.GetClient(); + var client3 = manager.GetClient(); + var client4 = manager.GetClient(); + + Assert.That(((RedisClient)client1).IsManagedClient, Is.True); + Assert.That(((RedisClient)client2).IsManagedClient, Is.True); + Assert.That(((RedisClient)client3).IsManagedClient, Is.True); + Assert.That(((RedisClient)client4).IsManagedClient, Is.True); + + Action func = delegate + { + Thread.Sleep(delay + TimeSpan.FromSeconds(0.5)); + client4.Dispose(); + }; +#if NETCORE + Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + var start = DateTime.Now; + + var client5 = manager.GetClient(); + + Assert.That(((RedisClient)client5).IsManagedClient, Is.False); //outside of pool + + Assert.That(DateTime.Now - start, Is.LessThan(delay)); + + AssertClientHasHost(client1, hosts[0]); + AssertClientHasHost(client2, hosts[1]); + AssertClientHasHost(client3, hosts[2]); + AssertClientHasHost(client4, hosts[3]); + AssertClientHasHost(client5, hosts[0]); + } + } + + [Test] + public void Can_support_64_threads_using_the_client_simultaneously() + { + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + var clientUsageMap = new Dictionary(); + +#if NETCORE + List tasks = new List(); +#else + var clientAsyncResults = new List(); +#endif + using (var manager = CreateManager()) + { + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + var action = (Action)(() => UseClient(manager, clientNo, clientUsageMap)); +#if NETCORE + tasks.Add(Task.Run(action)); +#else + clientAsyncResults.Add(action.BeginInvoke(null, null)); +#endif + } + } + +#if NETCORE + Task.WaitAll(tasks.ToArray()); +#else + WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); +#endif + + Debug.WriteLine(TypeSerializer.SerializeToString(clientUsageMap)); + + var hostCount = 0; + foreach (var entry in clientUsageMap) + { + Assert.That(entry.Value, Is.GreaterThanOrEqualTo(5), "Host has unproportionate distribution: " + entry.Value); + Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportionate distribution: " + entry.Value); + hostCount += entry.Value; + } + + Assert.That(hostCount, Is.EqualTo(noOfConcurrentClients), "Invalid no of clients used"); + } + + private static void UseClient(IRedisClientsManager manager, int clientNo, Dictionary hostCountMap) + { + using (var client = manager.GetClient()) + { + lock (hostCountMap) + { + int hostCount; + if (!hostCountMap.TryGetValue(client.Host, out hostCount)) + { + hostCount = 0; + } + + hostCountMap[client.Host] = ++hostCount; + } + + Debug.WriteLine(String.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); + } + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisMqHostPoolTests.cs b/tests/ServiceStack.Redis.Tests/RedisMqHostPoolTests.cs deleted file mode 100644 index 6e0e7c6e..00000000 --- a/tests/ServiceStack.Redis.Tests/RedisMqHostPoolTests.cs +++ /dev/null @@ -1,319 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Common; -using ServiceStack.Logging; -using ServiceStack.Logging.Support.Logging; -using ServiceStack.Messaging; -using ServiceStack.Redis.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests -{ - [Ignore("Will be removed")] - [TestFixture] - [Obsolete("Will be removed")] - public class RedisMqHostPoolTests - { - public class Reverse - { - public string Value { get; set; } - } - - public class Rot13 - { - public string Value { get; set; } - } - - [TestFixtureSetUp] - public void TestFixtureSetUp() - { - LogManager.LogFactory = new ConsoleLogFactory(); - } - - private static RedisMqHostPool CreateMqHostPool(int threadCount = 1) - { - var redisFactory = new BasicRedisClientManager(); - try - { - redisFactory.Exec(redis => redis.FlushAll()); - } - catch (RedisException rex) - { - Debug.WriteLine("WARNING: Redis not started? \n" + rex.Message); - } - var mqHost = new RedisMqHostPool(redisFactory) - { - NoOfThreadsPerService = threadCount, - }; - return mqHost; - } - - private static void Publish_4_messages(IMessageQueueClient mqClient) - { - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Reverse { Value = "ServiceStack" }); - mqClient.Publish(new Reverse { Value = "Redis" }); - } - - private static void Publish_4_Rot13_messages(IMessageQueueClient mqClient) - { - mqClient.Publish(new Rot13 { Value = "Hello" }); - mqClient.Publish(new Rot13 { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - mqClient.Publish(new Rot13 { Value = "Redis" }); - } - - [Test] - public void Utils_publish_Reverse_messages() - { - var mqHost = new RedisMqHost(new BasicRedisClientManager(), 2, null); - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_messages(mqClient); - } - - [Test] - public void Utils_publish_Rot13_messages() - { - var mqHost = new RedisMqHost(new BasicRedisClientManager(), 2, null); - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_Rot13_messages(mqClient); - } - - [Test] - public void Does_process_messages_sent_before_it_was_started() - { - var reverseCalled = 0; - - var mqHost = CreateMqHostPool(); - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_messages(mqClient); - - mqHost.Start(); - Thread.Sleep(3000); - - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(4)); - Assert.That(reverseCalled, Is.EqualTo(4)); - - mqHost.Dispose(); - } - - [Test] - public void Does_process_all_messages_and_Starts_Stops_correctly_with_multiple_threads_racing() - { - var mqHost = CreateMqHostPool(); - - var reverseCalled = 0; - var rot13Called = 0; - - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - mqHost.RegisterHandler(x => { rot13Called++; return x.GetBody().Value.ToRot13(); }); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - mqHost.Start(); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(3)); - - mqClient.Publish(new Reverse { Value = "Foo" }); - mqClient.Publish(new Rot13 { Value = "Bar" }); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - 5.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Stop())); - Thread.Sleep(10000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Stopped")); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - Debug.WriteLine("\n" + mqHost.GetStats()); - - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(5)); - Assert.That(reverseCalled, Is.EqualTo(3)); - Assert.That(rot13Called, Is.EqualTo(2)); - - mqHost.Dispose(); - } - - [Test] - public void Only_allows_1_BgThread_to_run_at_a_time() - { - var mqHost = CreateMqHostPool(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.RegisterHandler(x => x.GetBody().Value.ToRot13()); - - 5.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - Assert.That(mqHost.BgThreadCount, Is.EqualTo(1)); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Stop())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Stopped")); - - ThreadPool.QueueUserWorkItem(y => mqHost.Start()); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - Assert.That(mqHost.BgThreadCount, Is.EqualTo(2)); - - Debug.WriteLine(mqHost.GetStats()); - - mqHost.Dispose(); - } - - [Test, Ignore] - public void Cannot_Start_a_Disposed_MqHost() - { - var mqHost = CreateMqHostPool(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.Dispose(); - - try - { - mqHost.Start(); - Assert.Fail("Should throw ObjectDisposedException"); - } - catch (ObjectDisposedException) { } - } - - [Test, Ignore] - public void Cannot_Stop_a_Disposed_MqHost() - { - var mqHost = CreateMqHostPool(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.Start(); - Thread.Sleep(1000); - - mqHost.Dispose(); - - try - { - mqHost.Stop(); - Assert.Fail("Should throw ObjectDisposedException"); - } - catch (ObjectDisposedException) { } - } - - public class AlwaysThrows - { - public string Value { get; set; } - } - - [Test] - public void Does_retry_messages_with_errors_by_RetryCount() - { - var retryCount = 3; - var totalRetries = 1 + retryCount; //in total, inc. first try - - var mqHost = CreateMqHostPool(retryCount); - - var reverseCalled = 0; - var rot13Called = 0; - - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - mqHost.RegisterHandler(x => { rot13Called++; return x.GetBody().Value.ToRot13(); }); - mqHost.RegisterHandler(x => { throw new Exception("Always Throwing! " + x.GetBody().Value); }); - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new AlwaysThrows { Value = "1st" }); - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - Thread.Sleep(3000); - Assert.That(mqHost.GetStats().TotalMessagesFailed, Is.EqualTo(1 * totalRetries)); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(2 + 1)); - - 5.Times(x => mqClient.Publish(new AlwaysThrows { Value = "#" + x })); - - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - Thread.Sleep(5000); - - Debug.WriteLine(mqHost.GetStatsDescription()); - - Assert.That(mqHost.GetStats().TotalMessagesFailed, Is.EqualTo((1 + 5) * totalRetries)); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(6)); - - Assert.That(reverseCalled, Is.EqualTo(2 + 2)); - Assert.That(rot13Called, Is.EqualTo(1 + 1)); - } - - public class Incr - { - public int Value { get; set; } - } - - [Test] - public void Can_receive_and_process_same_reply_responses() - { - var mqHost = CreateMqHostPool(); - var called = 0; - - mqHost.RegisterHandler(m => { - Debug.WriteLine("In Incr #" + m.GetBody().Value); - called++; - return m.GetBody().Value > 0 ? new Incr { Value = m.GetBody().Value - 1 } : null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var incr = new Incr { Value = 5 }; - mqClient.Publish(incr); - - Thread.Sleep(1000); - - Assert.That(called, Is.EqualTo(1 + incr.Value)); - } - - public class Hello { public string Name { get; set; } } - public class HelloResponse { public string Result { get; set; } } - - [Test] - public void Can_receive_and_process_standard_request_reply_combo() - { - var mqHost = CreateMqHostPool(); - - string messageReceived = null; - - mqHost.RegisterHandler(m => - new HelloResponse { Result = "Hello, " + m.GetBody().Name }); - - mqHost.RegisterHandler(m => { - messageReceived = m.GetBody().Result; return null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var dto = new Hello { Name = "ServiceStack" }; - mqClient.Publish(dto); - - Thread.Sleep(1000); - - Assert.That(messageReceived, Is.EqualTo("Hello, ServiceStack")); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisMqHostSupportTests.cs b/tests/ServiceStack.Redis.Tests/RedisMqHostSupportTests.cs deleted file mode 100644 index 46edff99..00000000 --- a/tests/ServiceStack.Redis.Tests/RedisMqHostSupportTests.cs +++ /dev/null @@ -1,21 +0,0 @@ -using NUnit.Framework; -using ServiceStack.Messaging; -using ServiceStack.Messaging.Tests.Services; -using ServiceStack.Redis.Messaging; - -namespace ServiceStack.Redis.Tests -{ - [TestFixture, Category("Integration")] - public class RedisMqHostSupportTests - { - [Test] - public void Does_serialize_to_correct_MQ_name() - { - var message = new Message(new Greet {Name = "Test"}) {}; - - var mqClient = new RedisMessageQueueClient(new BasicRedisClientManager()); - - mqClient.Publish(message); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisMqHostTests.cs b/tests/ServiceStack.Redis.Tests/RedisMqHostTests.cs deleted file mode 100644 index 122ca3cb..00000000 --- a/tests/ServiceStack.Redis.Tests/RedisMqHostTests.cs +++ /dev/null @@ -1,331 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Common; -using ServiceStack.Logging; -using ServiceStack.Logging.Support.Logging; -using ServiceStack.Messaging; -using ServiceStack.Redis.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests -{ - [TestFixture, Category("Integration")] - public class RedisMqHostTests - { - private RedisMqHost mqHost; - - public class Reverse - { - public string Value { get; set; } - } - - public class Rot13 - { - public string Value { get; set; } - } - - [TestFixtureSetUp] - public void TestFixtureSetUp() - { - LogManager.LogFactory = new ConsoleLogFactory(); - } - - private RedisMqHost CreateMqHost() - { - return CreateMqHost(2); - } - - private static RedisMqHost CreateMqHost(int noOfRetries) - { - var redisFactory = new BasicRedisClientManager(); - try - { - redisFactory.Exec(redis => redis.FlushAll()); - } - catch (RedisException rex) - { - Debug.WriteLine("WARNING: Redis not started? \n" + rex.Message); - } - var mqHost = new RedisMqHost(redisFactory, noOfRetries, null); - return mqHost; - } - - private static void Publish_4_messages(IMessageQueueClient mqClient) - { - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Reverse { Value = "ServiceStack" }); - mqClient.Publish(new Reverse { Value = "Redis" }); - } - - private static void Publish_4_Rot13_messages(IMessageQueueClient mqClient) - { - mqClient.Publish(new Rot13 { Value = "Hello" }); - mqClient.Publish(new Rot13 { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - mqClient.Publish(new Rot13 { Value = "Redis" }); - } - - [Test] - public void Utils_publish_Reverse_messages() - { - mqHost = new RedisMqHost(new BasicRedisClientManager(), 2, null); - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_messages(mqClient); - Thread.Sleep(500); - mqHost.Dispose(); - } - - [Test] - public void Utils_publish_Rot13_messages() - { - mqHost = new RedisMqHost(new BasicRedisClientManager(), 2, null); - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_Rot13_messages(mqClient); - Thread.Sleep(500); - mqHost.Dispose(); - } - - [Test] - public void Does_process_messages_sent_before_it_was_started() - { - var reverseCalled = 0; - - mqHost = CreateMqHost(); - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_messages(mqClient); - - mqHost.Start(); - Thread.Sleep(3000); - - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(4)); - Assert.That(reverseCalled, Is.EqualTo(4)); - - mqHost.Dispose(); - } - - [Test, Ignore("Inconsistent behaviour")] - public void Does_process_all_messages_and_Starts_Stops_correctly_with_multiple_threads_racing() - { - mqHost = CreateMqHost(); - - var reverseCalled = 0; - var rot13Called = 0; - - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - mqHost.RegisterHandler(x => { rot13Called++; return x.GetBody().Value.ToRot13(); }); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - mqHost.Start(); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(3)); - - mqClient.Publish(new Reverse { Value = "Foo" }); - mqClient.Publish(new Rot13 { Value = "Bar" }); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - 5.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Stop())); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Stopped").Or.EqualTo("Stopping")); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - Debug.WriteLine("\n" + mqHost.GetStats()); - - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(5)); - Assert.That(reverseCalled, Is.EqualTo(3)); - Assert.That(rot13Called, Is.EqualTo(2)); - - mqHost.Dispose(); - } - - [Test] - public void Only_allows_1_BgThread_to_run_at_a_time() - { - mqHost = CreateMqHost(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.RegisterHandler(x => x.GetBody().Value.ToRot13()); - - 5.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - Assert.That(mqHost.BgThreadCount, Is.EqualTo(1)); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Stop())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Stopped")); - - ThreadPool.QueueUserWorkItem(y => mqHost.Start()); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - Assert.That(mqHost.BgThreadCount, Is.EqualTo(2)); - - Debug.WriteLine(mqHost.GetStats()); - - mqHost.Dispose(); - } - - [Test] - public void Cannot_Start_a_Disposed_MqHost() - { - mqHost = CreateMqHost(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.Dispose(); - - try - { - mqHost.Start(); - Assert.Fail("Should throw ObjectDisposedException"); - } - catch (ObjectDisposedException) { } - } - - [Test] - public void Cannot_Stop_a_Disposed_MqHost() - { - mqHost = CreateMqHost(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.Start(); - Thread.Sleep(5000); - - mqHost.Dispose(); - - try - { - mqHost.Stop(); - Assert.Fail("Should throw ObjectDisposedException"); - } - catch (ObjectDisposedException) { } - } - - public class AlwaysThrows - { - public string Value { get; set; } - } - - [Test] - public void Does_retry_messages_with_errors_by_RetryCount() - { - var retryCount = 3; - var totalRetries = 1 + retryCount; //in total, inc. first try - - mqHost = CreateMqHost(retryCount); - - var reverseCalled = 0; - var rot13Called = 0; - - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - mqHost.RegisterHandler(x => { rot13Called++; return x.GetBody().Value.ToRot13(); }); - mqHost.RegisterHandler(x => { throw new Exception("Always Throwing! " + x.GetBody().Value); }); - mqHost.Start(); - Thread.Sleep(3000); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - mqClient.Publish(new AlwaysThrows { Value = "1st" }); - - Thread.Sleep(3000); - - Assert.That(mqHost.GetStats().TotalMessagesFailed, Is.EqualTo(1 * totalRetries)); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(2 + 1)); - - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - 5.Times(x => mqClient.Publish(new AlwaysThrows { Value = "#" + x })); - - Thread.Sleep(10000); - - Debug.WriteLine(mqHost.GetStatsDescription()); - - Assert.That(mqHost.GetStats().TotalMessagesFailed, Is.EqualTo((1 + 5) * totalRetries)); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(6)); - - Assert.That(reverseCalled, Is.EqualTo(2 + 2)); - Assert.That(rot13Called, Is.EqualTo(1 + 1)); - - mqHost.Dispose(); - } - - public class Incr - { - public int Value { get; set; } - } - - [Test, Ignore("Inconsistent behaviour")] - public void Can_receive_and_process_same_reply_responses() - { - mqHost = CreateMqHost(); - var called = 0; - - mqHost.RegisterHandler(m => { - Debug.WriteLine("In Incr #" + m.GetBody().Value); - called++; - return m.GetBody().Value > 0 ? new Incr { Value = m.GetBody().Value - 1 } : null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var incr = new Incr { Value = 5 }; - mqClient.Publish(incr); - - Thread.Sleep(2000); - mqHost.Dispose(); - - Assert.That(called, Is.EqualTo(1 + incr.Value)); - } - - public class Hello { public string Name { get; set; } } - public class HelloResponse { public string Result { get; set; } } - - [Test] - public void Can_receive_and_process_standard_request_reply_combo() - { - mqHost = CreateMqHost(); - - string messageReceived = null; - - mqHost.RegisterHandler(m => - new HelloResponse { Result = "Hello, " + m.GetBody().Name }); - - mqHost.RegisterHandler(m => { - messageReceived = m.GetBody().Result; return null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var dto = new Hello { Name = "ServiceStack" }; - mqClient.Publish(dto); - - Thread.Sleep(2000); - mqHost.Dispose(); - - Assert.That(messageReceived, Is.EqualTo("Hello, ServiceStack")); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisMqServerSleepServerTests.cs b/tests/ServiceStack.Redis.Tests/RedisMqServerSleepServerTests.cs deleted file mode 100644 index eec3cb6e..00000000 --- a/tests/ServiceStack.Redis.Tests/RedisMqServerSleepServerTests.cs +++ /dev/null @@ -1,113 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Redis.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests -{ - [TestFixture, Category("Integration")] - public class RedisMqServerSleepServerTests - { - public class Counters - { - public int Sleep0 { get; set; } - public int Sleep10 { get; set; } - public int Sleep100 { get; set; } - public int Sleep1000 { get; set; } - } - - class Sleep0 - { - public int Id { get; set; } - } - class Sleep10 - { - public int Id { get; set; } - } - class Sleep100 - { - public int Id { get; set; } - } - class Sleep1000 - { - public int Id { get; set; } - } - - readonly Counters counter = new Counters(); - - RedisMqServer CreateServer() - { - using (var redis = new RedisClient()) - redis.FlushAll(); - - var mqServer = new RedisMqServer(new BasicRedisClientManager()); - mqServer.RegisterHandler(m => new Sleep0 { Id = counter.Sleep0++ }); - - mqServer.RegisterHandler(m => { - Thread.Sleep(10); - return new Sleep10 { Id = counter.Sleep10++ }; - }); - mqServer.RegisterHandler(m => { - Thread.Sleep(100); - return new Sleep100 { Id = counter.Sleep100++ }; - }); - mqServer.RegisterHandler(m => { - Thread.Sleep(1000); - return new Sleep1000 { Id = counter.Sleep1000++ }; - }); - - - return mqServer; - } - - [Test] - public void Run_for_1_seconds() - { - RunFor(TimeSpan.FromSeconds(1)); - } - - [Test] - public void Run_for_5_seconds() - { - RunFor(TimeSpan.FromSeconds(5)); - } - - [Test] - public void Run_for_10_seconds() - { - RunFor(TimeSpan.FromSeconds(10)); - } - - [Test] - public void Run_for_30_seconds() - { - RunFor(TimeSpan.FromSeconds(30)); - } - - private void RunFor(TimeSpan SleepFor) - { - var mqServer = CreateServer(); - - mqServer.Start(); - - using (var mqClient = mqServer.CreateMessageQueueClient()) - { - mqClient.Publish(new Sleep0()); - mqClient.Publish(new Sleep10()); - mqClient.Publish(new Sleep100()); - mqClient.Publish(new Sleep1000()); - } - - Thread.Sleep(SleepFor); - - Debug.WriteLine(counter.Dump()); - - Debug.WriteLine("Disposing..."); - mqServer.Dispose(); - - Debug.WriteLine(counter.Dump()); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisMqServerSpinServerTests.cs b/tests/ServiceStack.Redis.Tests/RedisMqServerSpinServerTests.cs deleted file mode 100644 index 0c99e8f3..00000000 --- a/tests/ServiceStack.Redis.Tests/RedisMqServerSpinServerTests.cs +++ /dev/null @@ -1,116 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Redis.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests -{ - [TestFixture, Category("Integration")] - public class RedisMqServerSpinServerTests - { - public class Counters - { - public int Spin0 { get; set; } - public int Spin10 { get; set; } - public int Spin100 { get; set; } - public int Spin1000 { get; set; } - } - - class Spin0 - { - public int Id { get; set; } - } - class Spin10 - { - public int Id { get; set; } - } - class Spin100 - { - public int Id { get; set; } - } - class Spin1000 - { - public int Id { get; set; } - } - - readonly Counters counter = new Counters(); - - RedisMqServer CreateServer() - { - using (var redis = new RedisClient()) - redis.FlushAll(); - - var mqServer = new RedisMqServer(new BasicRedisClientManager()); - mqServer.RegisterHandler(m => new Spin0 { Id = counter.Spin0++ }); - - mqServer.RegisterHandler(m => { - var sw = Stopwatch.StartNew(); - while (sw.ElapsedMilliseconds < 10) Thread.SpinWait(100000); - return new Spin10 { Id = counter.Spin10++ }; - }); - mqServer.RegisterHandler(m => { - var sw = Stopwatch.StartNew(); - while (sw.ElapsedMilliseconds < 100) Thread.SpinWait(100000); - return new Spin100 { Id = counter.Spin100++ }; - }); - mqServer.RegisterHandler(m => { - var sw = Stopwatch.StartNew(); - while (sw.ElapsedMilliseconds < 1000) Thread.SpinWait(100000); - return new Spin1000 { Id = counter.Spin1000++ }; - }); - - - return mqServer; - } - - [Test] - public void Run_for_1_seconds() - { - RunFor(TimeSpan.FromSeconds(1)); - } - - [Test] - public void Run_for_5_seconds() - { - RunFor(TimeSpan.FromSeconds(5)); - } - - [Test] - public void Run_for_10_seconds() - { - RunFor(TimeSpan.FromSeconds(10)); - } - - [Test] - public void Run_for_30_seconds() - { - RunFor(TimeSpan.FromSeconds(30)); - } - - private void RunFor(TimeSpan SpinFor) - { - var mqServer = CreateServer(); - - mqServer.Start(); - - using (var mqClient = mqServer.CreateMessageQueueClient()) - { - mqClient.Publish(new Spin0()); - mqClient.Publish(new Spin10()); - mqClient.Publish(new Spin100()); - mqClient.Publish(new Spin1000()); - } - - Thread.Sleep(SpinFor); - - Debug.WriteLine(counter.Dump()); - - Debug.WriteLine("Disposing..."); - mqServer.Dispose(); - - Debug.WriteLine(counter.Dump()); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisMqServerTests.cs b/tests/ServiceStack.Redis.Tests/RedisMqServerTests.cs deleted file mode 100644 index 22d5cab4..00000000 --- a/tests/ServiceStack.Redis.Tests/RedisMqServerTests.cs +++ /dev/null @@ -1,394 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq; -using System.Threading; -using NUnit.Framework; -using ServiceStack.Common; -using ServiceStack.Logging; -using ServiceStack.Logging.Support.Logging; -using ServiceStack.Messaging; -using ServiceStack.Redis.Messaging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Tests -{ - [TestFixture, Category("Integration")] - public class RedisMqServerTests - { - public class Reverse - { - public string Value { get; set; } - } - - public class Rot13 - { - public string Value { get; set; } - } - - [TestFixtureSetUp] - public void TestFixtureSetUp() - { - - LogManager.LogFactory = new ConsoleLogFactory(); - } - - private static RedisMqServer CreateMqServer(int noOfRetries = 2) - { - var redisFactory = new BasicRedisClientManager(); - try - { - redisFactory.Exec(redis => redis.FlushAll()); - } - catch (RedisException rex) - { - Debug.WriteLine("WARNING: Redis not started? \n" + rex.Message); - } - var mqHost = new RedisMqServer(redisFactory, noOfRetries); - return mqHost; - } - - private static void Publish_4_messages(IMessageQueueClient mqClient) - { - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Reverse { Value = "ServiceStack" }); - mqClient.Publish(new Reverse { Value = "Redis" }); - } - - private static void Publish_4_Rot13_messages(IMessageQueueClient mqClient) - { - mqClient.Publish(new Rot13 { Value = "Hello" }); - mqClient.Publish(new Rot13 { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - mqClient.Publish(new Rot13 { Value = "Redis" }); - } - - [Test] - public void Utils_publish_Reverse_messages() - { - var mqHost = new RedisMqHost(new BasicRedisClientManager(), 2); - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_messages(mqClient); - mqHost.Stop(); - } - - [Test] - public void Utils_publish_Rot13_messages() - { - var mqHost = new RedisMqHost(new BasicRedisClientManager(), 2); - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_Rot13_messages(mqClient); - mqHost.Stop(); - } - - [Test] - public void Does_process_messages_sent_before_it_was_started() - { - var reverseCalled = 0; - - var mqHost = CreateMqServer(); - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - - var mqClient = mqHost.CreateMessageQueueClient(); - Publish_4_messages(mqClient); - - mqHost.Start(); - Thread.Sleep(3000); - - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(4)); - Assert.That(reverseCalled, Is.EqualTo(4)); - - mqHost.Dispose(); - } - - [Test] - public void Does_process_all_messages_and_Starts_Stops_correctly_with_multiple_threads_racing() - { - var mqHost = CreateMqServer(); - - var reverseCalled = 0; - var rot13Called = 0; - - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - mqHost.RegisterHandler(x => { rot13Called++; return x.GetBody().Value.ToRot13(); }); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - mqHost.Start(); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(3)); - - mqClient.Publish(new Reverse { Value = "Foo" }); - mqClient.Publish(new Rot13 { Value = "Bar" }); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - 5.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Stop())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Stopped")); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Thread.Sleep(3000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - Debug.WriteLine("\n" + mqHost.GetStats()); - - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(5)); - Assert.That(reverseCalled, Is.EqualTo(3)); - Assert.That(rot13Called, Is.EqualTo(2)); - - mqHost.Dispose(); - } - - [Test] - public void Only_allows_1_BgThread_to_run_at_a_time() - { - var mqHost = CreateMqServer(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.RegisterHandler(x => x.GetBody().Value.ToRot13()); - - 5.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Start())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - Assert.That(mqHost.BgThreadCount, Is.EqualTo(1)); - - 10.Times(x => ThreadPool.QueueUserWorkItem(y => mqHost.Stop())); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Stopped")); - - ThreadPool.QueueUserWorkItem(y => mqHost.Start()); - Thread.Sleep(1000); - Assert.That(mqHost.GetStatus(), Is.EqualTo("Started")); - - Assert.That(mqHost.BgThreadCount, Is.EqualTo(2)); - - Debug.WriteLine(mqHost.GetStats()); - - mqHost.Dispose(); - } - - [Test] - public void Cannot_Start_a_Disposed_MqHost() - { - var mqHost = CreateMqServer(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.Dispose(); - - try - { - mqHost.Start(); - Assert.Fail("Should throw ObjectDisposedException"); - } - catch (ObjectDisposedException) { } - } - - [Test] - public void Cannot_Stop_a_Disposed_MqHost() - { - var mqHost = CreateMqServer(); - - mqHost.RegisterHandler(x => x.GetBody().Value.Reverse()); - mqHost.Start(); - Thread.Sleep(1000); - - mqHost.Dispose(); - - try - { - mqHost.Stop(); - Assert.Fail("Should throw ObjectDisposedException"); - } - catch (ObjectDisposedException) { } - } - - public class AlwaysThrows - { - public string Value { get; set; } - } - - [Test] - public void Does_retry_messages_with_errors_by_RetryCount() - { - var retryCount = 3; - var totalRetries = 1 + retryCount; //in total, inc. first try - - var mqHost = CreateMqServer(retryCount); - - var reverseCalled = 0; - var rot13Called = 0; - - mqHost.RegisterHandler(x => { reverseCalled++; return x.GetBody().Value.Reverse(); }); - mqHost.RegisterHandler(x => { rot13Called++; return x.GetBody().Value.ToRot13(); }); - mqHost.RegisterHandler(x => { throw new Exception("Always Throwing! " + x.GetBody().Value); }); - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - mqClient.Publish(new AlwaysThrows { Value = "1st" }); - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - Thread.Sleep(3000); - Assert.That(mqHost.GetStats().TotalMessagesFailed, Is.EqualTo(1 * totalRetries)); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(2 + 1)); - - 5.Times(x => mqClient.Publish(new AlwaysThrows { Value = "#" + x })); - - mqClient.Publish(new Reverse { Value = "Hello" }); - mqClient.Publish(new Reverse { Value = "World" }); - mqClient.Publish(new Rot13 { Value = "ServiceStack" }); - - Thread.Sleep(5000); - - Debug.WriteLine(mqHost.GetStatsDescription()); - - Assert.That(mqHost.GetStats().TotalMessagesFailed, Is.EqualTo((1 + 5) * totalRetries)); - Assert.That(mqHost.GetStats().TotalMessagesProcessed, Is.EqualTo(6)); - - Assert.That(reverseCalled, Is.EqualTo(2 + 2)); - Assert.That(rot13Called, Is.EqualTo(1 + 1)); - } - - public class Incr - { - public int Value { get; set; } - } - - [Test] - public void Can_receive_and_process_same_reply_responses() - { - var mqHost = CreateMqServer(); - var called = 0; - - mqHost.RegisterHandler(m => { - Debug.WriteLine("In Incr #" + m.GetBody().Value); - called++; - return m.GetBody().Value > 0 ? new Incr { Value = m.GetBody().Value - 1 } : null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var incr = new Incr { Value = 5 }; - mqClient.Publish(incr); - - Thread.Sleep(1000); - - Assert.That(called, Is.EqualTo(1 + incr.Value)); - } - - public class Hello { public string Name { get; set; } } - public class HelloResponse { public string Result { get; set; } } - - [Test] - public void Can_receive_and_process_standard_request_reply_combo() - { - var mqHost = CreateMqServer(); - - string messageReceived = null; - - mqHost.RegisterHandler(m => - new HelloResponse { Result = "Hello, " + m.GetBody().Name }); - - mqHost.RegisterHandler(m => { - messageReceived = m.GetBody().Result; return null; - }); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var dto = new Hello { Name = "ServiceStack" }; - mqClient.Publish(dto); - - Thread.Sleep(1000); - - Assert.That(messageReceived, Is.EqualTo("Hello, ServiceStack")); - } - - [Test] - public void Can_BlockingPop_from_multiple_queues() - { - const int noOf = 5; - var queueNames = noOf.Times(x => "queue:" + x).ToArray(); - - ThreadPool.QueueUserWorkItem(state => { - Thread.Sleep(100); - var i = 0; - var client = RedisClient.New(); - foreach (var queueName in queueNames) - { - var msgName = "msg:" + i++; - Debug.WriteLine("SEND " + msgName); - client.PrependItemToList(queueName, msgName); - } - }); - - var server = RedisClient.New(); - noOf.Times(x => { - Debug.WriteLine("Blocking... " + x); - var result = server.BlockingDequeueItemFromLists(queueNames, TimeSpan.FromSeconds(3)); - Debug.WriteLine("RECV: " + result.Dump()); - }); - } - - public class Wait - { - public int ForMs { get; set; } - } - - [Test] - public void Can_handle_requests_concurrently_in_2_threads() - { - RunHandlerOnMultipleThreads(noOfThreads: 2, msgs: 10); - } - - [Test] - public void Can_handle_requests_concurrently_in_3_threads() - { - RunHandlerOnMultipleThreads(noOfThreads: 3, msgs: 10); - } - - [Test] - public void Can_handle_requests_concurrently_in_4_threads() - { - RunHandlerOnMultipleThreads(noOfThreads: 4, msgs: 10); - } - - private static void RunHandlerOnMultipleThreads(int noOfThreads, int msgs) - { - var timesCalled = 0; - var mqHost = CreateMqServer(); - mqHost.RegisterHandler(m => { - timesCalled++; - Thread.Sleep(m.GetBody().ForMs); - return null; - }, noOfThreads); - - mqHost.Start(); - - var mqClient = mqHost.CreateMessageQueueClient(); - - var dto = new Wait { ForMs = 100 }; - msgs.Times(i => mqClient.Publish(dto)); - - const double buffer = 1.1; - - var sleepForMs = (int)((msgs * 100 / (double)noOfThreads) * buffer); - "Sleeping for {0}ms...".Print(sleepForMs); - Thread.Sleep(sleepForMs); - - mqHost.Dispose(); - - Assert.That(timesCalled, Is.EqualTo(msgs)); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs b/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs new file mode 100644 index 00000000..db10e714 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs @@ -0,0 +1,53 @@ +using System; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisPasswordTests + { + [Ignore("Integration")] + [Test] + public void Can_connect_to_Replicas_and_Masters_with_Password() + { + var factory = new PooledRedisClientManager( + readWriteHosts: new[] {"pass@10.0.0.59:6379"}, + readOnlyHosts: new[] {"pass@10.0.0.59:6380"}); + + using var readWrite = factory.GetClient(); + using var readOnly = factory.GetReadOnlyClient(); + readWrite.SetValue("Foo", "Bar"); + var value = readOnly.GetValue("Foo"); + + Assert.That(value, Is.EqualTo("Bar")); + } + + [Test] + public void Passwords_are_not_leaked_in_exception_messages() + { + const string password = "yesterdayspassword"; + + Assert.Throws(() => { + try + { + var connString = password + "@" + TestConfig.SingleHost + "?RetryTimeout=2000"; + // redis will throw when using password and it's not configured + var factory = new PooledRedisClientManager(connString); + using var redis = factory.GetClient(); + redis.SetValue("Foo", "Bar"); + } + catch (RedisResponseException ex) + { + Assert.That(ex.Message, Is.Not.Contains(password)); + throw; + } + catch (TimeoutException tex) + { + Assert.That(tex.InnerException.Message, Is.Not.Contains(password)); + throw tex.InnerException; + } + }, + "Expected an exception after Redis AUTH command; try using a password that doesn't match."); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.Async.cs new file mode 100644 index 00000000..1f0e5e42 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.Async.cs @@ -0,0 +1,65 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class RedisPersistenceProviderTestsAsync + { + [Test] + public async Task Can_Store_and_GetById_ModelWithIdAndName() + { + await using IRedisClientAsync redis = new RedisClient(TestConfig.SingleHost); + const int modelId = 1; + var to = ModelWithIdAndName.Create(modelId); + await redis.StoreAsync(to); + + var from = await redis.GetByIdAsync(modelId); + + ModelWithIdAndName.AssertIsEqual(to, from); + } + + [Test] + public async Task Can_StoreAll_and_GetByIds_ModelWithIdAndName() + { + await using IRedisClientAsync redis = new RedisClient(TestConfig.SingleHost); + + var ids = new[] { 1, 2, 3, 4, 5 }; + var tos = ids.Map(ModelWithIdAndName.Create); + + await redis.StoreAllAsync(tos); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.Id); + + Assert.That(fromIds, Is.EquivalentTo(ids)); + } + + [Test] + public async Task Can_Delete_ModelWithIdAndName() + { + await using IRedisClientAsync redis = new RedisClient(TestConfig.SingleHost); + var ids = new List { 1, 2, 3, 4, 5 }; + var tos = ids.ConvertAll(ModelWithIdAndName.Create); + + await redis.StoreAllAsync(tos); + + var deleteIds = new List { 2, 4 }; + + await redis.DeleteByIdsAsync(deleteIds); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.Id); + + var expectedIds = ids.Where(x => !deleteIds.Contains(x)).ToList(); + + Assert.That(fromIds, Is.EquivalentTo(expectedIds)); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.cs b/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.cs index 0af86e25..6e0852a6 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.cs @@ -1,69 +1,69 @@ using System.Collections.Generic; using System.Linq; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Common.Tests.Models; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class RedisPersistenceProviderTests - { - [Test] - public void Can_Store_and_GetById_ModelWithIdAndName() - { - using (var redis = new RedisClient(TestConfig.SingleHost)) - { - const int modelId = 1; - var to = ModelWithIdAndName.Create(modelId); - redis.Store(to); + [TestFixture, Category("Integration")] + public class RedisPersistenceProviderTests + { + [Test] + public void Can_Store_and_GetById_ModelWithIdAndName() + { + using (var redis = new RedisClient(TestConfig.SingleHost)) + { + const int modelId = 1; + var to = ModelWithIdAndName.Create(modelId); + redis.Store(to); - var from = redis.GetById(modelId); + var from = redis.GetById(modelId); - ModelWithIdAndName.AssertIsEqual(to, from); - } - } + ModelWithIdAndName.AssertIsEqual(to, from); + } + } - [Test] - public void Can_StoreAll_and_GetByIds_ModelWithIdAndName() - { - using (var redis = new RedisClient(TestConfig.SingleHost)) - { - var ids = new[] { 1, 2, 3, 4, 5 }; - var tos = ids.ConvertAll(x => ModelWithIdAndName.Create(x)); + [Test] + public void Can_StoreAll_and_GetByIds_ModelWithIdAndName() + { + using (var redis = new RedisClient(TestConfig.SingleHost)) + { + var ids = new[] { 1, 2, 3, 4, 5 }; + var tos = ids.Map(ModelWithIdAndName.Create); - redis.StoreAll(tos); + redis.StoreAll(tos); - var froms = redis.GetByIds(ids); - var fromIds = froms.ConvertAll(x => x.Id); + var froms = redis.GetByIds(ids); + var fromIds = froms.Map(x => x.Id); - Assert.That(fromIds, Is.EquivalentTo(ids)); - } - } + Assert.That(fromIds, Is.EquivalentTo(ids)); + } + } - [Test] - public void Can_Delete_ModelWithIdAndName() - { - using (var redis = new RedisClient(TestConfig.SingleHost)) - { - var ids = new List { 1, 2, 3, 4, 5 }; - var tos = ids.ConvertAll(x => ModelWithIdAndName.Create(x)); + [Test] + public void Can_Delete_ModelWithIdAndName() + { + using (var redis = new RedisClient(TestConfig.SingleHost)) + { + var ids = new List { 1, 2, 3, 4, 5 }; + var tos = ids.ConvertAll(ModelWithIdAndName.Create); - redis.StoreAll(tos); + redis.StoreAll(tos); - var deleteIds = new List { 2, 4 }; + var deleteIds = new List { 2, 4 }; - redis.DeleteByIds(deleteIds); + redis.DeleteByIds(deleteIds); - var froms = redis.GetByIds(ids); - var fromIds = froms.ConvertAll(x => x.Id); + var froms = redis.GetByIds(ids); + var fromIds = froms.Map(x => x.Id); - var expectedIds = ids.Where(x => !deleteIds.Contains(x)).ToList(); + var expectedIds = ids.Where(x => !deleteIds.Contains(x)).ToList(); - Assert.That(fromIds, Is.EquivalentTo(expectedIds)); - } - } + Assert.That(fromIds, Is.EquivalentTo(expectedIds)); + } + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.Async.cs new file mode 100644 index 00000000..4c0a707a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.Async.cs @@ -0,0 +1,73 @@ +using NUnit.Framework; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisPipelineCommonTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_Set_and_Expire_key_in_atomic_transaction() + { + var oneSec = TimeSpan.FromSeconds(1); + + Assert.That(await RedisAsync.GetValueAsync("key"), Is.Null); + await using (var trans = RedisAsync.CreatePipeline()) //Calls 'MULTI' + { + trans.QueueCommand(r => r.SetValueAsync("key", "a")); //Queues 'SET key a' + trans.QueueCommand(r => r.ExpireEntryInAsync("key", oneSec)); //Queues 'EXPIRE key 1' + + await trans.FlushAsync(); //Calls 'EXEC' + + } //Calls 'DISCARD' if 'EXEC' wasn't called + + Assert.That(await RedisAsync.GetValueAsync("key"), Is.EqualTo("a")); + await Task.Delay(TimeSpan.FromSeconds(2)); + Assert.That(await RedisAsync.GetValueAsync("key"), Is.Null); + } + + [Test] + public async Task Can_SetAll_and_Publish_in_atomic_transaction() + { + var messages = new Dictionary { { "a", "a" }, { "b", "b" } }; + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(c => c.SetAllAsync(messages.ToDictionary(t => t.Key, t => t.Value))); + pipeline.QueueCommand(c => c.PublishMessageAsync("uc", "b")); + + await pipeline.FlushAsync(); + } + + [Test] + public async Task Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() + { + var messages = new List { "message4", "message3", "message2" }; + + await RedisAsync.AddItemToListAsync("workq", "message1"); + + var priority = 1; + await messages.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync("prioritymsgs", x, priority++)); + + var highestPriorityMessage = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync("prioritymsgs"); + + await using (var trans = RedisAsync.CreatePipeline()) + { + trans.QueueCommand(r => r.RemoveItemFromSortedSetAsync("prioritymsgs", highestPriorityMessage)); + trans.QueueCommand(r => r.AddItemToListAsync("workq", highestPriorityMessage)); + + await trans.FlushAsync(); + } + + Assert.That(await RedisAsync.GetAllItemsFromListAsync("workq"), + Is.EquivalentTo(new List { "message1", "message2" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync("prioritymsgs"), + Is.EquivalentTo(new List { "message3", "message4" })); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.cs index 1fb44697..b7994dc5 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.cs @@ -8,29 +8,29 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisPipelineCommonTests - : RedisClientTestsBase - { - [Test] - public void Can_Set_and_Expire_key_in_atomic_transaction() - { - var oneSec = TimeSpan.FromSeconds(1); + [TestFixture] + public class RedisPipelineCommonTests + : RedisClientTestsBase + { + [Test] + public void Can_Set_and_Expire_key_in_atomic_transaction() + { + var oneSec = TimeSpan.FromSeconds(1); - Assert.That(Redis.GetValue("key"), Is.Null); - using (var trans = Redis.CreatePipeline()) //Calls 'MULTI' - { - trans.QueueCommand(r => r.SetEntry("key", "a")); //Queues 'SET key a' - trans.QueueCommand(r => r.ExpireEntryIn("key", oneSec)); //Queues 'EXPIRE key 1' + Assert.That(Redis.GetValue("key"), Is.Null); + using (var trans = Redis.CreatePipeline()) //Calls 'MULTI' + { + trans.QueueCommand(r => r.SetValue("key", "a")); //Queues 'SET key a' + trans.QueueCommand(r => r.ExpireEntryIn("key", oneSec)); //Queues 'EXPIRE key 1' - trans.Flush(); //Calls 'EXEC' + trans.Flush(); //Calls 'EXEC' - } //Calls 'DISCARD' if 'EXEC' wasn't called + } //Calls 'DISCARD' if 'EXEC' wasn't called - Assert.That(Redis.GetValue("key"), Is.EqualTo("a")); - Thread.Sleep(TimeSpan.FromSeconds(2)); - Assert.That(Redis.GetValue("key"), Is.Null); - } + Assert.That(Redis.GetValue("key"), Is.EqualTo("a")); + Thread.Sleep(TimeSpan.FromSeconds(2)); + Assert.That(Redis.GetValue("key"), Is.Null); + } [Test] public void Can_SetAll_and_Publish_in_atomic_transaction() @@ -45,31 +45,31 @@ public void Can_SetAll_and_Publish_in_atomic_transaction() } } - [Test] - public void Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() - { - var messages = new List { "message4", "message3", "message2" }; + [Test] + public void Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() + { + var messages = new List { "message4", "message3", "message2" }; + + Redis.AddItemToList("workq", "message1"); - Redis.AddItemToList("workq", "message1"); - - var priority = 1; - messages.ForEach(x => Redis.AddItemToSortedSet("prioritymsgs", x, priority++)); + var priority = 1; + messages.ForEach(x => Redis.AddItemToSortedSet("prioritymsgs", x, priority++)); - var highestPriorityMessage = Redis.PopItemWithHighestScoreFromSortedSet("prioritymsgs"); + var highestPriorityMessage = Redis.PopItemWithHighestScoreFromSortedSet("prioritymsgs"); - using (var trans = Redis.CreatePipeline()) - { - trans.QueueCommand(r => r.RemoveItemFromSortedSet("prioritymsgs", highestPriorityMessage)); - trans.QueueCommand(r => r.AddItemToList("workq", highestPriorityMessage)); + using (var trans = Redis.CreatePipeline()) + { + trans.QueueCommand(r => r.RemoveItemFromSortedSet("prioritymsgs", highestPriorityMessage)); + trans.QueueCommand(r => r.AddItemToList("workq", highestPriorityMessage)); - trans.Flush(); - } + trans.Flush(); + } - Assert.That(Redis.GetAllItemsFromList("workq"), - Is.EquivalentTo(new List { "message1", "message2" })); - Assert.That(Redis.GetAllItemsFromSortedSet("prioritymsgs"), - Is.EquivalentTo(new List { "message3", "message4" })); - } + Assert.That(Redis.GetAllItemsFromList("workq"), + Is.EquivalentTo(new List { "message1", "message2" })); + Assert.That(Redis.GetAllItemsFromSortedSet("prioritymsgs"), + Is.EquivalentTo(new List { "message3", "message4" })); + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.Async.cs new file mode 100644 index 00000000..6eea8ed1 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.Async.cs @@ -0,0 +1,281 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisPipelineTestsAsync + : RedisClientTestsBaseAsync + { + private const string Key = "pipemultitest"; + private const string ListKey = "pipemultitest-list"; + private const string SetKey = "pipemultitest-set"; + private const string SortedSetKey = "pipemultitest-sortedset"; + + public override void OnAfterEachTest() + { + CleanMask = Key + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_call_single_operation_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + var map = new Dictionary(); + pipeline.QueueCommand(r => r.GetAsync(Key).AsValueTask(), y => map[Key] = y); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + } + + [Test] + public async Task No_commit_of_atomic_pipelines_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + } + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Exception_in_atomic_pipelines_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + try + { + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + } + [Test] + public async Task Can_call_hash_operations_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var fields = new[] { "field1", "field2", "field3" }; + var values = new[] { "1", "2", "3" }; + var fieldBytes = new byte[fields.Length][]; + for (int i = 0; i < fields.Length; ++i) + { + fieldBytes[i] = GetBytes(fields[i]); + + } + var valueBytes = new byte[values.Length][]; + for (int i = 0; i < values.Length; ++i) + { + valueBytes[i] = GetBytes(values[i]); + + } + byte[][] members = null; + await using var pipeline = RedisAsync.CreatePipeline(); + + + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).HMSetAsync(Key, fieldBytes, valueBytes)); + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).HGetAllAsync(Key), x => members = x); + + + await pipeline.FlushAsync(); + + + for (var i = 0; i < members.Length; i += 2) + { + Assert.AreEqual(members[i], fieldBytes[i / 2]); + Assert.AreEqual(members[i + 1], valueBytes[i / 2]); + + } + } + + [Test] + public async Task Can_call_multiple_setexs_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var keys = new[] { Key + "key1", Key + "key2", Key + "key3" }; + var values = new[] { "1", "2", "3" }; + await using var pipeline = RedisAsync.CreatePipeline(); + + for (int i = 0; i < 3; ++i) + { + int index0 = i; + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(keys[index0], 100, GetBytes(values[index0]))); + } + + await pipeline.FlushAsync(); + await pipeline.ReplayAsync(); + + + for (int i = 0; i < 3; ++i) + Assert.AreEqual(await RedisAsync.GetValueAsync(keys[i]), values[i]); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_pipeline() + { + var results = new List(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_pipeline() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + pipeline.QueueCommand(r => r.AddItemToSetAsync(SetKey, "setitem")); + pipeline.QueueCommand(r => r.SetContainsItemAsync(SetKey, "setitem"), b => containsItem = b); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem1")); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem2")); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem3")); + pipeline.QueueCommand(r => r.GetListCountAsync(ListKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSetCountAsync(SetKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSortedSetCountAsync(SortedSetKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await pipeline.FlushAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + Assert.That(await RedisAsync.GetAllItemsFromListAsync(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); + Assert.That(await RedisAsync.GetAllItemsFromSetAsync(SetKey), Is.EquivalentTo(new List { "setitem" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); + } + + [Test] + public async Task Can_call_multi_string_operations_in_pipeline() + { + string item1 = null; + string item4 = null; + + var results = new List(); + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(0)); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem3")); + pipeline.QueueCommand(r => r.GetAllItemsFromListAsync(ListKey), x => results = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(ListKey, 0), x => item1 = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(ListKey, 4), x => item4 = x); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); + Assert.That(item1, Is.EqualTo("listitem1")); + Assert.That(item4, Is.Null); + } + [Test] + // Operations that are not supported in older versions will look at server info to determine what to do. + // If server info is fetched each time, then it will interfer with pipeline + public async Task Can_call_operation_not_supported_on_older_servers_in_pipeline() + { + var temp = new byte[1]; + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(Key + "key", 5, temp)); + await pipeline.FlushAsync(); + } + [Test] + public async Task Pipeline_can_be_replayed() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + await pipeline.FlushAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + await NativeAsync.DelAsync(Key); + await NativeAsync.DelAsync(KeySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + + await pipeline.ReplayAsync(); + await pipeline.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + [Test] + public async Task Pipeline_can_be_contain_watch() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).WatchAsync(new[] { Key + "FOO" })); + await pipeline.FlushAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + [Test] + public async Task Can_call_AddRangeToSet_in_pipeline() + { + await using var pipeline = RedisAsync.CreatePipeline(); + var key = "pipeline-test"; + + pipeline.QueueCommand(r => r.RemoveAsync(key).AsValueTask()); + pipeline.QueueCommand(r => r.AddRangeToSetAsync(key, new[] { "A", "B", "C" }.ToList())); + + await pipeline.FlushAsync(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs index 7fb3abe6..037f013e 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs @@ -1,84 +1,85 @@ using System; using System.Collections.Generic; +using System.Linq; using NUnit.Framework; namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisPipelineTests - : RedisClientTestsBase - { - private const string Key = "pipemultitest"; + [TestFixture] + public class RedisPipelineTests + : RedisClientTestsBase + { + private const string Key = "pipemultitest"; private const string ListKey = "pipemultitest-list"; private const string SetKey = "pipemultitest-set"; private const string SortedSetKey = "pipemultitest-sortedset"; - public override void TearDown() + public override void OnAfterEachTest() { CleanMask = Key + "*"; - base.TearDown(); + base.OnAfterEachTest(); } - [Test] - public void Can_call_single_operation_in_pipeline() - { - Assert.That(Redis.GetValue(Key), Is.Null); - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key)); - var map = new Dictionary(); - pipeline.QueueCommand(r => r.Get(Key), y => map[Key] = y); - - pipeline.Flush(); - } - - Assert.That(Redis.GetValue(Key), Is.EqualTo("1")); - } - - [Test] - public void No_commit_of_atomic_pipelines_discards_all_commands() - { - Assert.That(Redis.GetValue(Key), Is.Null); - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key)); - } - Assert.That(Redis.GetValue(Key), Is.Null); - } - - [Test] - public void Exception_in_atomic_pipelines_discards_all_commands() - { - Assert.That(Redis.GetValue(Key), Is.Null); - try - { - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key)); - throw new NotSupportedException(); - } - } - catch (NotSupportedException ignore) - { - Assert.That(Redis.GetValue(Key), Is.Null); - } - } - - [Test] - public void Can_call_single_operation_3_Times_in_pipeline() - { - Assert.That(Redis.GetValue(Key), Is.Null); - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key)); - pipeline.QueueCommand(r => r.IncrementValue(Key)); - pipeline.QueueCommand(r => r.IncrementValue(Key)); - - pipeline.Flush(); - } - - Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); - } + [Test] + public void Can_call_single_operation_in_pipeline() + { + Assert.That(Redis.GetValue(Key), Is.Null); + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key)); + var map = new Dictionary(); + pipeline.QueueCommand(r => r.Get(Key), y => map[Key] = y); + + pipeline.Flush(); + } + + Assert.That(Redis.GetValue(Key), Is.EqualTo("1")); + } + + [Test] + public void No_commit_of_atomic_pipelines_discards_all_commands() + { + Assert.That(Redis.GetValue(Key), Is.Null); + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key)); + } + Assert.That(Redis.GetValue(Key), Is.Null); + } + + [Test] + public void Exception_in_atomic_pipelines_discards_all_commands() + { + Assert.That(Redis.GetValue(Key), Is.Null); + try + { + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key)); + throw new NotSupportedException(); + } + } + catch (NotSupportedException) + { + Assert.That(Redis.GetValue(Key), Is.Null); + } + } + + [Test] + public void Can_call_single_operation_3_Times_in_pipeline() + { + Assert.That(Redis.GetValue(Key), Is.Null); + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key)); + pipeline.QueueCommand(r => r.IncrementValue(Key)); + pipeline.QueueCommand(r => r.IncrementValue(Key)); + + pipeline.Flush(); + } + + Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); + } [Test] public void Can_call_hash_operations_in_pipeline() { @@ -99,11 +100,11 @@ public void Can_call_hash_operations_in_pipeline() } byte[][] members = null; var pipeline = Redis.CreatePipeline(); - - + + pipeline.QueueCommand(r => ((RedisNativeClient)r).HMSet(Key, fieldBytes, valueBytes)); pipeline.QueueCommand(r => ((RedisNativeClient)r).HGetAll(Key), x => members = x); - + pipeline.Flush(); @@ -121,11 +122,11 @@ public void Can_call_hash_operations_in_pipeline() public void Can_call_multiple_setexs_in_pipeline() { Assert.That(Redis.GetValue(Key), Is.Null); - var keys = new[] {Key + "key1", Key + "key2", Key + "key3"}; - var values = new[] { "1","2","3" }; + var keys = new[] { Key + "key1", Key + "key2", Key + "key3" }; + var values = new[] { "1", "2", "3" }; var pipeline = Redis.CreatePipeline(); - - for (int i = 0; i < 3; ++i ) + + for (int i = 0; i < 3; ++i) { int index0 = i; pipeline.QueueCommand(r => ((RedisNativeClient)r).SetEx(keys[index0], 100, GetBytes(values[index0]))); @@ -133,92 +134,92 @@ public void Can_call_multiple_setexs_in_pipeline() pipeline.Flush(); pipeline.Replay(); - - - for (int i = 0; i < 3; ++i ) + + + for (int i = 0; i < 3; ++i) Assert.AreEqual(Redis.GetValue(keys[i]), values[i]); pipeline.Dispose(); } - [Test] - public void Can_call_single_operation_with_callback_3_Times_in_pipeline() - { - var results = new List(); - Assert.That(Redis.GetValue(Key), Is.Null); - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key), results.Add); - pipeline.QueueCommand(r => r.IncrementValue(Key), results.Add); - pipeline.QueueCommand(r => r.IncrementValue(Key), results.Add); - - pipeline.Flush(); - } - - Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); - Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); - } - - [Test] - public void Supports_different_operation_types_in_same_pipeline() - { - var incrementResults = new List(); - var collectionCounts = new List(); - var containsItem = false; - - Assert.That(Redis.GetValue(Key), Is.Null); - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); - pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); - pipeline.QueueCommand(r => r.AddItemToSet(SetKey, "setitem")); - pipeline.QueueCommand(r => r.SetContainsItem(SetKey, "setitem"), b => containsItem = b); - pipeline.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem1")); - pipeline.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem2")); - pipeline.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem3")); - pipeline.QueueCommand(r => r.GetListCount(ListKey), intResult => collectionCounts.Add(intResult)); - pipeline.QueueCommand(r => r.GetSetCount(SetKey), intResult => collectionCounts.Add(intResult)); - pipeline.QueueCommand(r => r.GetSortedSetCount(SortedSetKey), intResult => collectionCounts.Add(intResult)); - pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - - pipeline.Flush(); - } - - Assert.That(containsItem, Is.True); - Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); - Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); - Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); - Assert.That(Redis.GetAllItemsFromList(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); - Assert.That(Redis.GetAllItemsFromSet(SetKey), Is.EquivalentTo(new List { "setitem" })); - Assert.That(Redis.GetAllItemsFromSortedSet(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); - } - - [Test] - public void Can_call_multi_string_operations_in_pipeline() - { - string item1 = null; - string item4 = null; - - var results = new List(); - Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(0)); - using (var pipeline = Redis.CreatePipeline()) - { - pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); - pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); - pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem3")); - pipeline.QueueCommand(r => r.GetAllItemsFromList(ListKey), x => results = x); - pipeline.QueueCommand(r => r.GetItemFromList(ListKey, 0), x => item1 = x); - pipeline.QueueCommand(r => r.GetItemFromList(ListKey, 4), x => item4 = x); - - pipeline.Flush(); - } - - Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(3)); - Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); - Assert.That(item1, Is.EqualTo("listitem1")); - Assert.That(item4, Is.Null); - } + [Test] + public void Can_call_single_operation_with_callback_3_Times_in_pipeline() + { + var results = new List(); + Assert.That(Redis.GetValue(Key), Is.Null); + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key), results.Add); + pipeline.QueueCommand(r => r.IncrementValue(Key), results.Add); + pipeline.QueueCommand(r => r.IncrementValue(Key), results.Add); + + pipeline.Flush(); + } + + Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public void Supports_different_operation_types_in_same_pipeline() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + Assert.That(Redis.GetValue(Key), Is.Null); + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); + pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); + pipeline.QueueCommand(r => r.AddItemToSet(SetKey, "setitem")); + pipeline.QueueCommand(r => r.SetContainsItem(SetKey, "setitem"), b => containsItem = b); + pipeline.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem1")); + pipeline.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem2")); + pipeline.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem3")); + pipeline.QueueCommand(r => r.GetListCount(ListKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSetCount(SetKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSortedSetCount(SortedSetKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + + pipeline.Flush(); + } + + Assert.That(containsItem, Is.True); + Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + Assert.That(Redis.GetAllItemsFromList(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); + Assert.That(Redis.GetAllItemsFromSet(SetKey), Is.EquivalentTo(new List { "setitem" })); + Assert.That(Redis.GetAllItemsFromSortedSet(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); + } + + [Test] + public void Can_call_multi_string_operations_in_pipeline() + { + string item1 = null; + string item4 = null; + + var results = new List(); + Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(0)); + using (var pipeline = Redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); + pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); + pipeline.QueueCommand(r => r.AddItemToList(ListKey, "listitem3")); + pipeline.QueueCommand(r => r.GetAllItemsFromList(ListKey), x => results = x); + pipeline.QueueCommand(r => r.GetItemFromList(ListKey, 0), x => item1 = x); + pipeline.QueueCommand(r => r.GetItemFromList(ListKey, 4), x => item4 = x); + + pipeline.Flush(); + } + + Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); + Assert.That(item1, Is.EqualTo("listitem1")); + Assert.That(item4, Is.Null); + } [Test] // Operations that are not supported in older versions will look at server info to determine what to do. // If server info is fetched each time, then it will interfer with pipeline @@ -227,7 +228,7 @@ public void Can_call_operation_not_supported_on_older_servers_in_pipeline() var temp = new byte[1]; using (var pipeline = Redis.CreatePipeline()) { - pipeline.QueueCommand(r => ((RedisNativeClient)r).SetEx(Key + "key",5,temp)); + pipeline.QueueCommand(r => ((RedisNativeClient)r).SetEx(Key + "key", 5, temp)); pipeline.Flush(); } } @@ -255,8 +256,8 @@ public void Pipeline_can_be_replayed() Assert.That(Redis.GetValue(Key), Is.EqualTo("1")); Assert.That(Redis.GetValue(KeySquared), Is.EqualTo("1")); } - } + [Test] public void Pipeline_can_be_contain_watch() { @@ -273,8 +274,20 @@ public void Pipeline_can_be_contain_watch() Assert.That(Redis.GetValue(Key), Is.EqualTo("1")); Assert.That(Redis.GetValue(KeySquared), Is.EqualTo("1")); } - } - } + [Test] + public void Can_call_AddRangeToSet_in_pipeline() + { + using (var pipeline = Redis.CreatePipeline()) + { + var key = "pipeline-test"; + + pipeline.QueueCommand(r => r.Remove(key)); + pipeline.QueueCommand(r => r.AddRangeToSet(key, new[] { "A", "B", "C" }.ToList())); + + pipeline.Flush(); + } + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs b/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs new file mode 100644 index 00000000..4984228c --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs @@ -0,0 +1,149 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [Ignore("Ignore long running tests")] + [TestFixture] + public class RedisPubSubServerTests + { + RedisManagerPool clientsManager = new RedisManagerPool(TestConfig.MasterHosts); + + [OneTimeTearDown] + public void OneTimeTearDown() + { + clientsManager.Dispose(); + } + + private RedisPubSubServer CreatePubSubServer( + int intervalSecs = 1, int timeoutSecs = 3, params string[] channels) + { + using (var redis = clientsManager.GetClient()) + redis.FlushAll(); + + if (channels.Length == 0) + channels = new[] {"topic:test"}; + + var pubSub = new RedisPubSubServer( + clientsManager, + channels) + { + HeartbeatInterval = TimeSpan.FromSeconds(intervalSecs), + HeartbeatTimeout = TimeSpan.FromSeconds(timeoutSecs) + }; + + return pubSub; + } + + [Test] + public void Does_send_heartbeat_pulses() + { + int pulseCount = 0; + using (var pubSub = CreatePubSubServer(intervalSecs: 1, timeoutSecs: 3)) + { + pubSub.OnHeartbeatReceived = () => "pulse #{0}".Print(++pulseCount); + pubSub.Start(); + + Thread.Sleep(3100); + + Assert.That(pulseCount, Is.GreaterThan(2)); + } + } + + [Test] + public void Does_restart_when_Heartbeat_Timeout_exceeded() + { + //This auto restarts 2 times before letting connection to stay alive + + int pulseCount = 0; + int startCount = 0; + int stopCount = 0; + + using (var pubSub = CreatePubSubServer(intervalSecs: 1, timeoutSecs: 3)) + { + pubSub.OnStart = () => "start #{0}".Print(++startCount); + pubSub.OnStop = () => "stop #{0}".Print(++stopCount); + pubSub.OnHeartbeatReceived = () => "pulse #{0}".Print(++pulseCount); + + //pause longer than heartbeat timeout so autoreconnects + pubSub.OnControlCommand = op => + { + if (op == "PULSE" && stopCount < 2) + Thread.Sleep(4000); + }; + + pubSub.Start(); + + Thread.Sleep(30 * 1000); + + Assert.That(pulseCount, Is.GreaterThan(3)); + Assert.That(startCount, Is.EqualTo(3)); + Assert.That(stopCount, Is.EqualTo(2)); + } + } + + [Test] + public void Does_send_heartbeat_pulses_to_multiple_PubSubServers() + { + var count = 15; + + int pulseCount = 0; + var pubSubs = count.Times(i => + { + var pubSub = CreatePubSubServer(intervalSecs: 20, timeoutSecs: 30); + pubSub.OnHeartbeatReceived = () => "{0}: pulse #{1}".Print(i, ++pulseCount); + pubSub.Start(); + return pubSub; + }); + + Thread.Sleep(32000); + + "pulseCount = {0}".Print(pulseCount); + + Assert.That(pulseCount, Is.GreaterThan(2 * count)); + Assert.That(pulseCount, Is.LessThan(8 * count)); + + pubSubs.Each(x => x.Dispose()); + } + + [Test] + public void Can_restart_and_subscribe_to_more_channels() + { + var a = new List(); + var b = new List(); + var pubSub = CreatePubSubServer(intervalSecs: 20, timeoutSecs: 30, "topic:a"); + pubSub.OnMessage = (channel, msg) => { + if (channel == "topic:a") + a.Add(msg); + else if (channel == "topic:b") + b.Add(msg); + }; + pubSub.Start(); + Thread.Sleep(100); + + var client = clientsManager.GetClient(); + var i = 0; + client.PublishMessage("topic:a", $"msg: ${++i}"); + client.PublishMessage("topic:b", $"msg: ${++i}"); + + Thread.Sleep(100); + Assert.That(a.Count, Is.EqualTo(1)); + Assert.That(b.Count, Is.EqualTo(0)); + + pubSub.Channels = new[] {"topic:a", "topic:b"}; + pubSub.Restart(); + Thread.Sleep(100); + + client.PublishMessage("topic:a", $"msg: ${++i}"); + client.PublishMessage("topic:b", $"msg: ${++i}"); + + + Thread.Sleep(100); + Assert.That(a.Count, Is.EqualTo(2)); + Assert.That(b.Count, Is.EqualTo(1)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPubSubTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPubSubTests.Async.cs new file mode 100644 index 00000000..b8865dc6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPubSubTests.Async.cs @@ -0,0 +1,293 @@ +using NUnit.Framework; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisPubSubTestsAsync + : RedisClientTestsBaseAsync + { + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "RedisPubSubTests"; + } + + [Test] + public async Task Can_Subscribe_and_Publish_single_message() + { + var channelName = PrefixedKey("CHANNEL1"); + const string message = "Hello, World!"; + var key = PrefixedKey("Can_Subscribe_and_Publish_single_message"); + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(message)); + await subscription.UnSubscribeFromAllChannelsAsync(); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing '{0}' to '{1}'", message, channelName); + await redisClient.PublishMessageAsync(channelName, message); + }); + + Log("Start Listening On " + channelName); + await subscription.SubscribeToChannelsAsync(new[] { channelName }); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + } + + [Test] + public async Task Can_Subscribe_and_Publish_single_message_using_wildcard() + { + var channelWildcard = PrefixedKey("CHANNEL.*"); + var channelName = PrefixedKey("CHANNEL.1"); + const string message = "Hello, World!"; + var key = PrefixedKey("Can_Subscribe_and_Publish_single_message"); + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channelWildcard); + Assert.That(channel, Is.EqualTo(channelWildcard)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channelWildcard); + Assert.That(channel, Is.EqualTo(channelWildcard)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(message), "we should get the message, not the channel"); + await subscription.UnSubscribeFromChannelsMatchingAsync(new string[0]); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing '{0}' to '{1}'", message, channelName); + await redisClient.PublishMessageAsync(channelName, message); + }); + + Log("Start Listening On " + channelName); + await subscription.SubscribeToChannelsMatchingAsync(new[] { channelWildcard }); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + } + + [Test] + public async Task Can_Subscribe_and_Publish_multiple_message() + { + var channelName = PrefixedKey("CHANNEL2"); + const string messagePrefix = "MESSAGE "; + string key = PrefixedKey("Can_Subscribe_and_Publish_multiple_message"); + const int publishMessageCount = 5; + var messagesReceived = 0; + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(messagePrefix + messagesReceived++)); + + if (messagesReceived == publishMessageCount) + { + await subscription.UnSubscribeFromAllChannelsAsync(); + } + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + for (var i = 0; i < publishMessageCount; i++) + { + var message = messagePrefix + i; + Log("Publishing '{0}' to '{1}'", message, channelName); + await redisClient.PublishMessageAsync(channelName, message); + } + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsAsync(new[] { channelName }); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + + Assert.That(messagesReceived, Is.EqualTo(publishMessageCount)); + } + + [Test] + public async Task Can_Subscribe_and_Publish_message_to_multiple_channels() + { + var channelPrefix = PrefixedKey("CHANNEL3 "); + const string message = "MESSAGE"; + const int publishChannelCount = 5; + var key = PrefixedKey("Can_Subscribe_and_Publish_message_to_multiple_channels"); + + var channels = new List(); + publishChannelCount.Times(i => channels.Add(channelPrefix + i)); + + var messagesReceived = 0; + var channelsSubscribed = 0; + var channelsUnSubscribed = 0; + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelPrefix + channelsSubscribed++)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelPrefix + channelsUnSubscribed++)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelPrefix + messagesReceived++)); + Assert.That(msg, Is.EqualTo(message)); + + await subscription.UnSubscribeFromChannelsAsync(new[] { channel }); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + foreach (var channel in channels) + { + Log("Publishing '{0}' to '{1}'", message, channel); + await redisClient.PublishMessageAsync(channel, message); + } + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsAsync(channels.ToArray()); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + + Assert.That(messagesReceived, Is.EqualTo(publishChannelCount)); + Assert.That(channelsSubscribed, Is.EqualTo(publishChannelCount)); + Assert.That(channelsUnSubscribed, Is.EqualTo(publishChannelCount)); + } + + [Test] + public async Task Can_Subscribe_to_channel_pattern() + { + int msgs = 0; + await using var subscription = await RedisAsync.CreateSubscriptionAsync(); + subscription.OnMessageAsync += async (channel, msg) => + { + Debug.WriteLine(String.Format("{0}: {1}", channel, msg + msgs++)); + await subscription.UnSubscribeFromChannelsMatchingAsync(new[] { PrefixedKey("CHANNEL4:TITLE*") }); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing msg..."); + await redisClient.PublishMessageAsync(PrefixedKey("CHANNEL4:TITLE1"), "hello"); // .ToUtf8Bytes() + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsMatchingAsync(new[] { PrefixedKey("CHANNEL4:TITLE*") }); + } + + [Test] + public async Task Can_Subscribe_to_multiplechannel_pattern() + { + var channels = new[] { PrefixedKey("CHANNEL5:TITLE*"), PrefixedKey("CHANNEL5:BODY*") }; + int msgs = 0; + await using var subscription = await RedisAsync.CreateSubscriptionAsync(); + subscription.OnMessageAsync += async (channel, msg) => + { + Debug.WriteLine(String.Format("{0}: {1}", channel, msg + msgs++)); + await subscription.UnSubscribeFromChannelsMatchingAsync(channels); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing msg..."); + await redisClient.PublishMessageAsync(PrefixedKey("CHANNEL5:BODY"), "hello"); // .ToUtf8Bytes() + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsMatchingAsync(channels); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPubSubTests.cs b/tests/ServiceStack.Redis.Tests/RedisPubSubTests.cs index 05c7cf88..62ff5f4b 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPubSubTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPubSubTests.cs @@ -3,68 +3,68 @@ using System.Diagnostics; using System.Threading; using NUnit.Framework; -using ServiceStack.Common.Extensions; +using ServiceStack.Common; using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class RedisPubSubTests - : RedisClientTestsBase - { + [TestFixture, Category("Integration")] + public class RedisPubSubTests + : RedisClientTestsBase + { public override void OnBeforeEachTest() { base.OnBeforeEachTest(); Redis.NamespacePrefix = "RedisPubSubTests"; } - [Test] - public void Can_Subscribe_and_Publish_single_message() - { - var channelName = PrefixedKey("CHANNEL1"); - const string message = "Hello, World!"; - var key = PrefixedKey("Can_Subscribe_and_Publish_single_message"); + [Test] + public void Can_Subscribe_and_Publish_single_message() + { + var channelName = PrefixedKey("CHANNEL1"); + const string message = "Hello, World!"; + var key = PrefixedKey("Can_Subscribe_and_Publish_single_message"); Redis.IncrementValue(key); - using (var subscription = Redis.CreateSubscription()) - { - subscription.OnSubscribe = channel => - { - Log("Subscribed to '{0}'", channel); - Assert.That(channel, Is.EqualTo(channelName)); - }; - subscription.OnUnSubscribe = channel => - { - Log("UnSubscribed from '{0}'", channel); - Assert.That(channel, Is.EqualTo(channelName)); - }; - subscription.OnMessage = (channel, msg) => - { - Log("Received '{0}' from channel '{1}'", msg, channel); - Assert.That(channel, Is.EqualTo(channelName)); - Assert.That(msg, Is.EqualTo(message)); - subscription.UnSubscribeFromAllChannels(); - }; - - ThreadPool.QueueUserWorkItem(x => - { + using (var subscription = Redis.CreateSubscription()) + { + subscription.OnSubscribe = channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + }; + subscription.OnUnSubscribe = channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + }; + subscription.OnMessage = (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(message)); + subscription.UnSubscribeFromAllChannels(); + }; + + ThreadPool.QueueUserWorkItem(x => + { Thread.Sleep(100); // to be sure that we have subscribers - using (var redisClient = CreateRedisClient()) - { - Log("Publishing '{0}' to '{1}'", message, channelName); - redisClient.PublishMessage(channelName, message); - } - }); - - Log("Start Listening On " + channelName); - subscription.SubscribeToChannels(channelName); //blocking - } - - Log("Using as normal client again..."); + using (var redisClient = CreateRedisClient()) + { + Log("Publishing '{0}' to '{1}'", message, channelName); + redisClient.PublishMessage(channelName, message); + } + }); + + Log("Start Listening On " + channelName); + subscription.SubscribeToChannels(channelName); //blocking + } + + Log("Using as normal client again..."); Redis.IncrementValue(key); Assert.That(Redis.Get(key), Is.EqualTo(2)); - } + } [Test] public void Can_Subscribe_and_Publish_single_message_using_wildcard() @@ -91,9 +91,9 @@ public void Can_Subscribe_and_Publish_single_message_using_wildcard() subscription.OnMessage = (channel, msg) => { Log("Received '{0}' from channel '{1}'", msg, channel); - Assert.That(channel, Is.EqualTo(channelWildcard)); + Assert.That(channel, Is.EqualTo(channelName)); Assert.That(msg, Is.EqualTo(message), "we should get the message, not the channel"); - subscription.UnSubscribeFromAllChannels(); + subscription.UnSubscribeFromChannelsMatching(); }; ThreadPool.QueueUserWorkItem(x => @@ -116,162 +116,163 @@ public void Can_Subscribe_and_Publish_single_message_using_wildcard() } [Test] - public void Can_Subscribe_and_Publish_multiple_message() - { - var channelName = PrefixedKey("CHANNEL2"); - const string messagePrefix = "MESSAGE "; + public void Can_Subscribe_and_Publish_multiple_message() + { + var channelName = PrefixedKey("CHANNEL2"); + const string messagePrefix = "MESSAGE "; string key = PrefixedKey("Can_Subscribe_and_Publish_multiple_message"); - const int publishMessageCount = 5; - var messagesReceived = 0; + const int publishMessageCount = 5; + var messagesReceived = 0; Redis.IncrementValue(key); - using (var subscription = Redis.CreateSubscription()) - { - subscription.OnSubscribe = channel => - { - Log("Subscribed to '{0}'", channel); - Assert.That(channel, Is.EqualTo(channelName)); - }; - subscription.OnUnSubscribe = channel => - { - Log("UnSubscribed from '{0}'", channel); - Assert.That(channel, Is.EqualTo(channelName)); - }; - subscription.OnMessage = (channel, msg) => - { - Log("Received '{0}' from channel '{1}'", msg, channel); - Assert.That(channel, Is.EqualTo(channelName)); - Assert.That(msg, Is.EqualTo(messagePrefix + messagesReceived++)); - - if (messagesReceived == publishMessageCount) - { - subscription.UnSubscribeFromAllChannels(); - } - }; - - ThreadPool.QueueUserWorkItem(x => - { + using (var subscription = Redis.CreateSubscription()) + { + subscription.OnSubscribe = channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + }; + subscription.OnUnSubscribe = channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + }; + subscription.OnMessage = (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(messagePrefix + messagesReceived++)); + + if (messagesReceived == publishMessageCount) + { + subscription.UnSubscribeFromAllChannels(); + } + }; + + ThreadPool.QueueUserWorkItem(x => + { Thread.Sleep(100); // to be sure that we have subscribers - using (var redisClient = CreateRedisClient()) - { - for (var i = 0; i < publishMessageCount; i++) - { - var message = messagePrefix + i; - Log("Publishing '{0}' to '{1}'", message, channelName); - redisClient.PublishMessage(channelName, message); - } - } - }); - - Log("Start Listening On"); - subscription.SubscribeToChannels(channelName); //blocking - } - - Log("Using as normal client again..."); + using (var redisClient = CreateRedisClient()) + { + for (var i = 0; i < publishMessageCount; i++) + { + var message = messagePrefix + i; + Log("Publishing '{0}' to '{1}'", message, channelName); + redisClient.PublishMessage(channelName, message); + } + } + }); + + Log("Start Listening On"); + subscription.SubscribeToChannels(channelName); //blocking + } + + Log("Using as normal client again..."); Redis.IncrementValue(key); Assert.That(Redis.Get(key), Is.EqualTo(2)); - Assert.That(messagesReceived, Is.EqualTo(publishMessageCount)); - } + Assert.That(messagesReceived, Is.EqualTo(publishMessageCount)); + } - [Test] - public void Can_Subscribe_and_Publish_message_to_multiple_channels() - { - var channelPrefix = PrefixedKey("CHANNEL3 "); - const string message = "MESSAGE"; - const int publishChannelCount = 5; + [Test] + public void Can_Subscribe_and_Publish_message_to_multiple_channels() + { + var channelPrefix = PrefixedKey("CHANNEL3 "); + const string message = "MESSAGE"; + const int publishChannelCount = 5; var key = PrefixedKey("Can_Subscribe_and_Publish_message_to_multiple_channels"); - var channels = new List(); - publishChannelCount.Times(i => channels.Add(channelPrefix + i)); - - var messagesReceived = 0; - var channelsSubscribed = 0; - var channelsUnSubscribed = 0; - - Redis.IncrementValue(key); - - using (var subscription = Redis.CreateSubscription()) - { - subscription.OnSubscribe = channel => - { - Log("Subscribed to '{0}'", channel); - Assert.That(channel, Is.EqualTo(channelPrefix + channelsSubscribed++)); - }; - subscription.OnUnSubscribe = channel => - { - Log("UnSubscribed from '{0}'", channel); - Assert.That(channel, Is.EqualTo(channelPrefix + channelsUnSubscribed++)); - }; - subscription.OnMessage = (channel, msg) => - { - Log("Received '{0}' from channel '{1}'", msg, channel); - Assert.That(channel, Is.EqualTo(channelPrefix + messagesReceived++)); - Assert.That(msg, Is.EqualTo(message)); - - subscription.UnSubscribeFromChannels(channel); - }; - - ThreadPool.QueueUserWorkItem(x => - { + var channels = new List(); + publishChannelCount.Times(i => channels.Add(channelPrefix + i)); + + var messagesReceived = 0; + var channelsSubscribed = 0; + var channelsUnSubscribed = 0; + + Redis.IncrementValue(key); + + using (var subscription = Redis.CreateSubscription()) + { + subscription.OnSubscribe = channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelPrefix + channelsSubscribed++)); + }; + subscription.OnUnSubscribe = channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelPrefix + channelsUnSubscribed++)); + }; + subscription.OnMessage = (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelPrefix + messagesReceived++)); + Assert.That(msg, Is.EqualTo(message)); + + subscription.UnSubscribeFromChannels(channel); + }; + + ThreadPool.QueueUserWorkItem(x => + { Thread.Sleep(100); // to be sure that we have subscribers - using (var redisClient = CreateRedisClient()) - { - foreach (var channel in channels) - { - Log("Publishing '{0}' to '{1}'", message, channel); - redisClient.PublishMessage(channel, message); - } - } - }); - - Log("Start Listening On"); - subscription.SubscribeToChannels(channels.ToArray()); //blocking - } - - Log("Using as normal client again..."); + using (var redisClient = CreateRedisClient()) + { + foreach (var channel in channels) + { + Log("Publishing '{0}' to '{1}'", message, channel); + redisClient.PublishMessage(channel, message); + } + } + }); + + Log("Start Listening On"); + subscription.SubscribeToChannels(channels.ToArray()); //blocking + } + + Log("Using as normal client again..."); Redis.IncrementValue(key); Assert.That(Redis.Get(key), Is.EqualTo(2)); - Assert.That(messagesReceived, Is.EqualTo(publishChannelCount)); - Assert.That(channelsSubscribed, Is.EqualTo(publishChannelCount)); - Assert.That(channelsUnSubscribed, Is.EqualTo(publishChannelCount)); - } - - [Test] - public void Can_Subscribe_to_channel_pattern() - { - int msgs = 0; - using (var subscription = Redis.CreateSubscription()) - { - subscription.OnMessage = (channel, msg) => { - Debug.WriteLine(String.Format("{0}: {1}", channel, msg + msgs++)); - subscription.UnSubscribeFromChannelsMatching(PrefixedKey("CHANNEL4:TITLE*")); - }; - - ThreadPool.QueueUserWorkItem(x => - { + Assert.That(messagesReceived, Is.EqualTo(publishChannelCount)); + Assert.That(channelsSubscribed, Is.EqualTo(publishChannelCount)); + Assert.That(channelsUnSubscribed, Is.EqualTo(publishChannelCount)); + } + + [Test] + public void Can_Subscribe_to_channel_pattern() + { + int msgs = 0; + using (var subscription = Redis.CreateSubscription()) + { + subscription.OnMessage = (channel, msg) => + { + Debug.WriteLine(String.Format("{0}: {1}", channel, msg + msgs++)); + subscription.UnSubscribeFromChannelsMatching(PrefixedKey("CHANNEL4:TITLE*")); + }; + + ThreadPool.QueueUserWorkItem(x => + { Thread.Sleep(100); // to be sure that we have subscribers - using (var redisClient = CreateRedisClient()) - { - Log("Publishing msg..."); - redisClient.Publish(PrefixedKey("CHANNEL4:TITLE1"), "hello".ToUtf8Bytes()); - } - }); + using (var redisClient = CreateRedisClient()) + { + Log("Publishing msg..."); + redisClient.Publish(PrefixedKey("CHANNEL4:TITLE1"), "hello".ToUtf8Bytes()); + } + }); - Log("Start Listening On"); - subscription.SubscribeToChannelsMatching(PrefixedKey("CHANNEL4:TITLE*")); - } - } + Log("Start Listening On"); + subscription.SubscribeToChannelsMatching(PrefixedKey("CHANNEL4:TITLE*")); + } + } [Test] public void Can_Subscribe_to_multiplechannel_pattern() { - var channels = new[] {PrefixedKey("CHANNEL5:TITLE*"), PrefixedKey("CHANNEL5:BODY*")}; + var channels = new[] { PrefixedKey("CHANNEL5:TITLE*"), PrefixedKey("CHANNEL5:BODY*") }; int msgs = 0; using (var subscription = Redis.CreateSubscription()) { @@ -297,5 +298,5 @@ public void Can_Subscribe_to_multiplechannel_pattern() } } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisScanTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisScanTests.Async.cs new file mode 100644 index 00000000..b5a79762 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisScanTests.Async.cs @@ -0,0 +1,173 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisScanTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_scan_10_collection() + { + await RedisAsync.FlushAllAsync(); + var keys = 10.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var ret = await NativeAsync.ScanAsync(0); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(keys)); + } + + [Test] + public async Task Can_scan_100_collection_over_cursor() + { + var allKeys = new HashSet(); + await RedisAsync.FlushAllAsync(); + var keys = 100.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var i = 0; + var ret = new ScanResult(); + while (true) + { + ret = await NativeAsync.ScanAsync(ret.Cursor, 10); + i++; + ret.AsStrings().ForEach(x => allKeys.Add(x)); + if (ret.Cursor == 0) break; + } + + Assert.That(i, Is.GreaterThanOrEqualTo(2)); + Assert.That(allKeys.Count, Is.EqualTo(keys.Count)); + Assert.That(allKeys, Is.EquivalentTo(keys)); + } + + [Test] + public async Task Can_scan_and_search_10_collection() + { + await RedisAsync.FlushAllAsync(); + var keys = 11.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var ret = await NativeAsync.ScanAsync(0, 11, match: "KEY1*"); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(new[] { "KEY1", "KEY10" })); + } + + [Test] + public async Task Can_SScan_10_sets() + { + await RedisAsync.FlushAllAsync(); + var items = 10.Times(x => "item" + x); + await items.ForEachAsync(async x => await RedisAsync.AddItemToSetAsync("scanset", x)); + + var ret = await NativeAsync.SScanAsync("scanset", 0); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(items)); + } + + [Test] + public async Task Can_ZScan_10_sortedsets() + { + await RedisAsync.FlushAllAsync(); + var items = 10.Times(x => "item" + x); + var i = 0; + await items.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync("scanzset", x, i++)); + + var ret = await NativeAsync.ZScanAsync("scanzset", 0); + var itemsWithScore = ret.AsItemsWithScores(); + + Assert.That(itemsWithScore.Keys, Is.EqualTo(items)); + Assert.That(itemsWithScore.Values, Is.EqualTo(10.Times(x => (double)x))); + } + + [Test] + public async Task Can_HScan_10_hashes() + { + await RedisAsync.FlushAllAsync(); + var values = 10.Times(x => "VALUE" + x); + await RedisAsync.SetRangeInHashAsync("scanhash", values.ToSafeDictionary(x => x.Replace("VALUE", "KEY"))); + + var ret = await NativeAsync.HScanAsync("scanhash", 0); + + var keyValues = ret.AsKeyValues(); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(keyValues.Keys, Is.EquivalentTo(values.ConvertAll(x => x.Replace("VALUE", "KEY")))); + Assert.That(keyValues.Values, Is.EquivalentTo(values)); + } + + [Test] + public async Task Does_lazy_scan_all_keys() + { + await RedisAsync.FlushAllAsync(); + var keys = 100.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var scanAllKeys = RedisAsync.ScanAllKeysAsync(pageSize: 10); + var tenKeys = await scanAllKeys.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllKeys.CountAsync(), Is.EqualTo(100)); + } + + [Test] + public async Task Does_lazy_scan_all_set_items() + { + await RedisAsync.FlushAllAsync(); + var items = 100.Times(x => "item" + x); + await items.ForEachAsync(async x => await RedisAsync.AddItemToSetAsync("scanset", x)); + + var scanAllItems = RedisAsync.ScanAllSetItemsAsync("scanset", pageSize: 10); + var tenKeys = await scanAllItems.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllItems.CountAsync(), Is.EqualTo(100)); + } + + [Test] + public async Task Does_lazy_scan_all_sortedset_items() + { + await RedisAsync.FlushAllAsync(); + var items = 100.Times(x => "item" + x); + var i = 0; + await items.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync("scanzset", x, i++)); + + var scanAllItems = RedisAsync.ScanAllSortedSetItemsAsync("scanzset", pageSize: 10); + var tenKeys = await scanAllItems.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllItems.CountAsync(), Is.EqualTo(100)); + + var map = await scanAllItems.ToDictionaryAsync(x => x.Key, x => x.Value); + Assert.That(map.Keys, Is.EquivalentTo(items)); + } + + [Test] + public async Task Does_lazy_scan_all_hash_items() + { + await RedisAsync.FlushAllAsync(); + var values = 100.Times(x => "VALUE" + x); + await RedisAsync.SetRangeInHashAsync("scanhash", values.ToSafeDictionary(x => x.Replace("VALUE", "KEY"))); + + var scanAllItems = RedisAsync.ScanAllHashEntriesAsync("scanhash", pageSize: 10); + var tenKeys = await scanAllItems.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllItems.CountAsync(), Is.EqualTo(100)); + + var map = await scanAllItems.ToDictionaryAsync(x => x.Key, x => x.Value); + Assert.That(map.Values, Is.EquivalentTo(values)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisScanTests.cs b/tests/ServiceStack.Redis.Tests/RedisScanTests.cs new file mode 100644 index 00000000..93b30369 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisScanTests.cs @@ -0,0 +1,172 @@ +using System.Collections.Generic; +using System.Linq; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisScanTests + : RedisClientTestsBase + { + [Test] + public void Can_scan_10_collection() + { + Redis.FlushAll(); + var keys = 10.Times(x => "KEY" + x); + Redis.SetAll(keys.ToSafeDictionary(x => x)); + + var ret = Redis.Scan(0); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(keys)); + } + + [Test] + public void Can_scan_100_collection_over_cursor() + { + var allKeys = new HashSet(); + Redis.FlushAll(); + var keys = 100.Times(x => "KEY" + x); + Redis.SetAll(keys.ToSafeDictionary(x => x)); + + var i = 0; + var ret = new ScanResult(); + while (true) + { + ret = Redis.Scan(ret.Cursor, 10); + i++; + ret.AsStrings().ForEach(x => allKeys.Add(x)); + if (ret.Cursor == 0) break; + } + + Assert.That(i, Is.GreaterThanOrEqualTo(2)); + Assert.That(allKeys.Count, Is.EqualTo(keys.Count)); + Assert.That(allKeys, Is.EquivalentTo(keys)); + } + + [Test] + public void Can_scan_and_search_10_collection() + { + Redis.FlushAll(); + var keys = 11.Times(x => "KEY" + x); + Redis.SetAll(keys.ToSafeDictionary(x => x)); + + var ret = Redis.Scan(0, 11, match: "KEY1*"); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(new[] { "KEY1", "KEY10" })); + } + + [Test] + public void Can_SScan_10_sets() + { + Redis.FlushAll(); + var items = 10.Times(x => "item" + x); + items.ForEach(x => Redis.AddItemToSet("scanset", x)); + + var ret = Redis.SScan("scanset", 0); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(items)); + } + + [Test] + public void Can_ZScan_10_sortedsets() + { + Redis.FlushAll(); + var items = 10.Times(x => "item" + x); + var i = 0; + items.ForEach(x => Redis.AddItemToSortedSet("scanzset", x, i++)); + + var ret = Redis.ZScan("scanzset", 0); + var itemsWithScore = ret.AsItemsWithScores(); + + Assert.That(itemsWithScore.Keys, Is.EqualTo(items)); + Assert.That(itemsWithScore.Values, Is.EqualTo(10.Times(x => (double)x))); + } + + [Test] + public void Can_HScan_10_hashes() + { + Redis.FlushAll(); + var values = 10.Times(x => "VALUE" + x); + Redis.SetRangeInHash("scanhash", values.ToSafeDictionary(x => x.Replace("VALUE", "KEY"))); + + var ret = Redis.HScan("scanhash", 0); + + var keyValues = ret.AsKeyValues(); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(keyValues.Keys, Is.EquivalentTo(values.ConvertAll(x => x.Replace("VALUE", "KEY")))); + Assert.That(keyValues.Values, Is.EquivalentTo(values)); + } + + [Test] + public void Does_lazy_scan_all_keys() + { + Redis.FlushAll(); + var keys = 100.Times(x => "KEY" + x); + Redis.SetAll(keys.ToSafeDictionary(x => x)); + + var scanAllKeys = Redis.ScanAllKeys(pageSize: 10); + var tenKeys = scanAllKeys.Take(10).ToList(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(scanAllKeys.Count(), Is.EqualTo(100)); + } + + [Test] + public void Does_lazy_scan_all_set_items() + { + Redis.FlushAll(); + var items = 100.Times(x => "item" + x); + items.ForEach(x => Redis.AddItemToSet("scanset", x)); + + var scanAllItems = Redis.ScanAllSetItems("scanset", pageSize: 10); + var tenKeys = scanAllItems.Take(10).ToList(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(scanAllItems.Count(), Is.EqualTo(100)); + } + + [Test] + public void Does_lazy_scan_all_sortedset_items() + { + Redis.FlushAll(); + var items = 100.Times(x => "item" + x); + var i = 0; + items.ForEach(x => Redis.AddItemToSortedSet("scanzset", x, i++)); + + var scanAllItems = Redis.ScanAllSortedSetItems("scanzset", pageSize: 10); + var tenKeys = scanAllItems.Take(10).ToList(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(scanAllItems.Count(), Is.EqualTo(100)); + + var map = scanAllItems.ToDictionary(x => x.Key, x => x.Value); + Assert.That(map.Keys, Is.EquivalentTo(items)); + } + + [Test] + public void Does_lazy_scan_all_hash_items() + { + Redis.FlushAll(); + var values = 100.Times(x => "VALUE" + x); + Redis.SetRangeInHash("scanhash", values.ToSafeDictionary(x => x.Replace("VALUE", "KEY"))); + + var scanAllItems = Redis.ScanAllHashEntries("scanhash", pageSize: 10); + var tenKeys = scanAllItems.Take(10).ToList(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(scanAllItems.Count(), Is.EqualTo(100)); + + var map = scanAllItems.ToDictionary(x => x.Key, x => x.Value); + Assert.That(map.Values, Is.EquivalentTo(values)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisStatsTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisStatsTests.Async.cs new file mode 100644 index 00000000..2fd145ef --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisStatsTests.Async.cs @@ -0,0 +1,43 @@ +using System.Collections.Generic; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisStatsTestsAsync + : RedisClientTestsBaseAsync + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.AssumeServerVersion = 2821; + } + + [Test] + [Ignore("too long")] + public async Task Batch_and_Pipeline_requests_only_counts_as_1_request() + { + var reqCount = RedisNativeClient.RequestsPerHour; + + var map = new Dictionary(); + 10.Times(i => map["key" + i] = "value" + i); + + await RedisAsync.SetValuesAsync(map); + + Assert.That(RedisNativeClient.RequestsPerHour, Is.EqualTo(reqCount + 1)); + + var keyTypes = new Dictionary(); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + map.Keys.Each(key => + pipeline.QueueCommand(r => r.TypeAsync(key), x => keyTypes[key] = x)); + + await pipeline.FlushAsync(); + } + + Assert.That(RedisNativeClient.RequestsPerHour, Is.EqualTo(reqCount + 2)); + Assert.That(keyTypes.Count, Is.EqualTo(map.Count)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs b/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs new file mode 100644 index 00000000..04dc8853 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs @@ -0,0 +1,42 @@ +using System.Collections.Generic; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisStatsTests + : RedisClientTestsBase + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.AssumeServerVersion = 2821; + } + + [Test] + [Ignore("too long")] + public void Batch_and_Pipeline_requests_only_counts_as_1_request() + { + var reqCount = RedisNativeClient.RequestsPerHour; + + var map = new Dictionary(); + 10.Times(i => map["key" + i] = "value" + i); + + Redis.SetValues(map); + + Assert.That(RedisNativeClient.RequestsPerHour, Is.EqualTo(reqCount + 1)); + + var keyTypes = new Dictionary(); + using (var pipeline = Redis.CreatePipeline()) + { + map.Keys.Each(key => + pipeline.QueueCommand(r => r.Type(key), x => keyTypes[key] = x)); + + pipeline.Flush(); + } + + Assert.That(RedisNativeClient.RequestsPerHour, Is.EqualTo(reqCount + 2)); + Assert.That(keyTypes.Count, Is.EqualTo(map.Count)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisTemplateTests.cs b/tests/ServiceStack.Redis.Tests/RedisTemplateTests.cs new file mode 100644 index 00000000..03876a1d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisTemplateTests.cs @@ -0,0 +1,47 @@ +using NUnit.Framework; +using ServiceStack.Script; +using ServiceStack.Templates; + +namespace ServiceStack.Redis.Tests +{ + class RedisTemplateTests + { + [Test] + public void Does_build_connection_string() + { + var context = new ScriptContext + { + ScriptMethods = { new RedisScripts() } + }; + context.Container.AddSingleton(() => new RedisManagerPool()); + context.Init(); + + Assert.That(context.EvaluateScript("{{ redisToConnectionString: host:7000?db=1 }}"), + Is.EqualTo("host:7000?db=1")); + + Assert.That(context.EvaluateScript("{{ { host: 'host' } | redisToConnectionString }}"), + Is.EqualTo("host:6379?db=0")); + + Assert.That(context.EvaluateScript("{{ { port: 7000 } | redisToConnectionString }}"), + Is.EqualTo("localhost:7000?db=0")); + + Assert.That(context.EvaluateScript("{{ { db: 1 } | redisToConnectionString }}"), + Is.EqualTo("localhost:6379?db=1")); + + Assert.That(context.EvaluateScript("{{ { host: 'host', port: 7000, db: 1 } | redisToConnectionString }}"), + Is.EqualTo("host:7000?db=1")); + + Assert.That(context.EvaluateScript("{{ { host: 'host', port: 7000, db: 1, password:'secret' } | redisToConnectionString | raw }}"), + Is.EqualTo("host:7000?db=1&password=secret")); + + Assert.That(context.EvaluateScript("{{ redisConnectionString }}"), + Is.EqualTo("localhost:6379?db=0")); + + Assert.That(context.EvaluateScript("{{ { db: 1 } | redisChangeConnection }}"), + Is.EqualTo("localhost:6379?db=1")); + + Assert.That(context.EvaluateScript("{{ redisConnectionString }}"), + Is.EqualTo("localhost:6379?db=1")); + } + } +} diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.Async.cs new file mode 100644 index 00000000..6ecd123b --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.Async.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisTransactionCommonTestsAsync + : RedisClientTestsBaseAsync + { + private const string Prefix = "tran"; + + public override void OnAfterEachTest() + { + CleanMask = Prefix + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_Set_and_Expire_key_in_atomic_transaction() + { + var oneSec = TimeSpan.FromSeconds(1); + + Assert.That(await RedisAsync.GetValueAsync(Prefix + "key"), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) //Calls 'MULTI' + { + trans.QueueCommand(r => r.SetValueAsync(Prefix + "key", "a")); //Queues 'SET key a' + trans.QueueCommand(r => r.ExpireEntryInAsync(Prefix + "key", oneSec)); //Queues 'EXPIRE key 1' + + await trans.CommitAsync(); //Calls 'EXEC' + + } //Calls 'DISCARD' if 'EXEC' wasn't called + + Assert.That(await RedisAsync.GetValueAsync(Prefix + "key"), Is.EqualTo("a")); + await Task.Delay(TimeSpan.FromSeconds(2)); + Assert.That(await RedisAsync.GetValueAsync(Prefix + "key"), Is.Null); + } + + [Test] + public async Task Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() + { + var messages = new List { "message4", "message3", "message2" }; + + await RedisAsync.AddItemToListAsync(Prefix + "workq", "message1"); + + var priority = 1; + await messages.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(Prefix + "prioritymsgs", x, priority++)); + + var highestPriorityMessage = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync(Prefix + "prioritymsgs"); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.RemoveItemFromSortedSetAsync(Prefix + "prioritymsgs", highestPriorityMessage)); + trans.QueueCommand(r => r.AddItemToListAsync(Prefix + "workq", highestPriorityMessage)); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetAllItemsFromListAsync(Prefix + "workq"), + Is.EquivalentTo(new List { "message1", "message2" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync(Prefix + "prioritymsgs"), + Is.EquivalentTo(new List { "message3", "message4" })); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs index 3eae60cb..8b4ad350 100644 --- a/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs @@ -5,63 +5,63 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisTransactionCommonTests - : RedisClientTestsBase - { + [TestFixture] + public class RedisTransactionCommonTests + : RedisClientTestsBase + { private const string Prefix = "tran"; - public override void TearDown() + public override void OnAfterEachTest() { CleanMask = Prefix + "*"; - base.TearDown(); + base.OnAfterEachTest(); } - [Test] - public void Can_Set_and_Expire_key_in_atomic_transaction() - { - var oneSec = TimeSpan.FromSeconds(1); + [Test] + public void Can_Set_and_Expire_key_in_atomic_transaction() + { + var oneSec = TimeSpan.FromSeconds(1); - Assert.That(Redis.GetValue(Prefix + "key"), Is.Null); - using (var trans = Redis.CreateTransaction()) //Calls 'MULTI' - { - trans.QueueCommand(r => r.SetEntry(Prefix + "key", "a")); //Queues 'SET key a' + Assert.That(Redis.GetValue(Prefix + "key"), Is.Null); + using (var trans = Redis.CreateTransaction()) //Calls 'MULTI' + { + trans.QueueCommand(r => r.SetValue(Prefix + "key", "a")); //Queues 'SET key a' trans.QueueCommand(r => r.ExpireEntryIn(Prefix + "key", oneSec)); //Queues 'EXPIRE key 1' - trans.Commit(); //Calls 'EXEC' + trans.Commit(); //Calls 'EXEC' - } //Calls 'DISCARD' if 'EXEC' wasn't called + } //Calls 'DISCARD' if 'EXEC' wasn't called Assert.That(Redis.GetValue(Prefix + "key"), Is.EqualTo("a")); - Thread.Sleep(TimeSpan.FromSeconds(2)); + Thread.Sleep(TimeSpan.FromSeconds(2)); Assert.That(Redis.GetValue(Prefix + "key"), Is.Null); - } + } - [Test] - public void Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() - { - var messages = new List { "message4", "message3", "message2" }; + [Test] + public void Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() + { + var messages = new List { "message4", "message3", "message2" }; Redis.AddItemToList(Prefix + "workq", "message1"); - - var priority = 1; + + var priority = 1; messages.ForEach(x => Redis.AddItemToSortedSet(Prefix + "prioritymsgs", x, priority++)); var highestPriorityMessage = Redis.PopItemWithHighestScoreFromSortedSet(Prefix + "prioritymsgs"); - using (var trans = Redis.CreateTransaction()) - { + using (var trans = Redis.CreateTransaction()) + { trans.QueueCommand(r => r.RemoveItemFromSortedSet(Prefix + "prioritymsgs", highestPriorityMessage)); - trans.QueueCommand(r => r.AddItemToList(Prefix + "workq", highestPriorityMessage)); + trans.QueueCommand(r => r.AddItemToList(Prefix + "workq", highestPriorityMessage)); - trans.Commit(); - } + trans.Commit(); + } - Assert.That(Redis.GetAllItemsFromList(Prefix + "workq"), - Is.EquivalentTo(new List { "message1", "message2" })); - Assert.That(Redis.GetAllItemsFromSortedSet(Prefix + "prioritymsgs"), - Is.EquivalentTo(new List { "message3", "message4" })); - } + Assert.That(Redis.GetAllItemsFromList(Prefix + "workq"), + Is.EquivalentTo(new List { "message1", "message2" })); + Assert.That(Redis.GetAllItemsFromSortedSet(Prefix + "prioritymsgs"), + Is.EquivalentTo(new List { "message3", "message4" })); + } - } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.Async.cs new file mode 100644 index 00000000..3d11b6b4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.Async.cs @@ -0,0 +1,417 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisTransactionTestsAsync + : RedisClientTestsBaseAsync + { + private const string Key = "rdtmultitest"; + private const string ListKey = "rdtmultitest-list"; + private const string SetKey = "rdtmultitest-set"; + private const string SortedSetKey = "rdtmultitest-sortedset"; + private const string HashKey = "rdthashtest"; + + public override void OnAfterEachTest() + { + CleanMask = Key + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_call_single_operation_in_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + var map = new Dictionary(); + trans.QueueCommand(r => r.GetAsync(Key).AsValueTask(), y => map[Key] = y); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + } + + [Test] + public async Task No_commit_of_atomic_transactions_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + } + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Watch_aborts_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + const string value1 = "value1"; + try + { + await RedisAsync.WatchAsync(new[] { Key }); + await RedisAsync.SetAsync(Key, value1); + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.SetAsync(Key, value1).AsValueTask()); + var success = await trans.CommitAsync(); + Assert.False(success); + Assert.AreEqual(value1, await RedisAsync.GetAsync(Key)); + } + catch (NotSupportedException) + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Exception_in_atomic_transactions_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + try + { + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_transaction() + { + var results = new List(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + trans.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + trans.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_transaction() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + trans.QueueCommand(r => r.AddItemToSetAsync(SetKey, "setitem")); + trans.QueueCommand(r => r.SetContainsItemAsync(SetKey, "setitem"), b => containsItem = b); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem1")); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem2")); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem3")); + trans.QueueCommand(r => r.GetListCountAsync(ListKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSetCountAsync(SetKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSortedSetCountAsync(SortedSetKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await trans.CommitAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + Assert.That(await RedisAsync.GetAllItemsFromListAsync(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); + Assert.That(await RedisAsync.GetAllItemsFromSetAsync(SetKey), Is.EquivalentTo(new List { "setitem" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); + } + + [Test] + public async Task Can_call_multi_string_operations_in_transaction() + { + string item1 = null; + string item4 = null; + + var results = new List(); + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(0)); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem3")); + trans.QueueCommand(r => r.GetAllItemsFromListAsync(ListKey), x => results = x); + trans.QueueCommand(r => r.GetItemFromListAsync(ListKey, 0), x => item1 = x); + trans.QueueCommand(r => r.GetItemFromListAsync(ListKey, 4), x => item4 = x); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); + Assert.That(item1, Is.EqualTo("listitem1")); + Assert.That(item4, Is.Null); + } + [Test] + public async Task Can_call_multiple_setexs_in_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var keys = new[] { "key1", "key2", "key3" }; + var values = new[] { "1", "2", "3" }; + await using var trans = await RedisAsync.CreateTransactionAsync(); + + for (int i = 0; i < 3; ++i) + { + int index0 = i; + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(keys[index0], 100, GetBytes(values[index0]))); + } + + await trans.CommitAsync(); + await trans.ReplayAsync(); + + + for (int i = 0; i < 3; ++i) + Assert.AreEqual(await RedisAsync.GetValueAsync(keys[i]), values[i]); + } + [Test] + // Operations that are not supported in older versions will look at server info to determine what to do. + // If server info is fetched each time, then it will interfer with transaction + public async Task Can_call_operation_not_supported_on_older_servers_in_transaction() + { + var temp = new byte[1]; + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(Key, 5, temp)); + await trans.CommitAsync(); + } + + + [Test] + public async Task Transaction_can_be_replayed() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + await trans.CommitAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + await NativeAsync.DelAsync(Key); + await NativeAsync.DelAsync(KeySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + + await trans.ReplayAsync(); + await trans.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + [Test] + public async Task Transaction_can_issue_watch() + { + await NativeAsync.DelAsync(Key); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + + string KeySquared = Key + Key; + await NativeAsync.DelAsync(KeySquared); + + await RedisAsync.WatchAsync(new[] { Key, KeySquared }); + await RedisAsync.SetAsync(Key, 7); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.SetAsync(Key, 1).AsValueTask()); + trans.QueueCommand(r => r.SetAsync(KeySquared, 2).AsValueTask()); + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("7")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + } + + [Test] + public async Task Can_set_Expiry_on_key_in_transaction() + { + var expiresIn = TimeSpan.FromMinutes(15); + + const string key = "No TTL-Transaction"; + var keyWithTtl = "{0}s TTL-Transaction".Fmt(expiresIn.TotalSeconds); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddAsync(key, "Foo").AsValueTask()); + trans.QueueCommand(r => r.AddAsync(keyWithTtl, "Bar", expiresIn).AsValueTask()); + + if (!await trans.CommitAsync()) + throw new Exception("Transaction Failed"); + } + + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo("Foo")); + Assert.That(await RedisAsync.GetAsync(keyWithTtl), Is.EqualTo("Bar")); + + Assert.That(await RedisAsync.GetTimeToLiveAsync(key), Is.EqualTo(TimeSpan.MaxValue)); + Assert.That((await RedisAsync.GetTimeToLiveAsync(keyWithTtl)).Value.TotalSeconds, Is.GreaterThan(1)); + } + + [Test] + public async Task Does_not_set_Expiry_on_existing_key_in_transaction() + { + var expiresIn = TimeSpan.FromMinutes(15); + + var key = "Exting TTL-Transaction"; + await RedisAsync.AddAsync(key, "Foo"); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddAsync(key, "Bar", expiresIn).AsValueTask()); + + if (!await trans.CommitAsync()) + throw new Exception("Transaction Failed"); + } + + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo("Foo")); + Assert.That(await RedisAsync.GetTimeToLiveAsync(key), Is.EqualTo(TimeSpan.MaxValue)); + } + + [Test] + public async Task Can_call_GetAllEntriesFromHash_in_transaction() + { + var stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashKey, x.Key, x.Value); + } + + Dictionary results = null; + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.GetAllEntriesFromHashAsync(HashKey), x => results = x); + + await trans.CommitAsync(); + } + + Assert.That(results, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_call_Type_in_transaction() + { + await RedisAsync.SetValueAsync("string", "STRING"); + await RedisAsync.AddItemToListAsync("list", "LIST"); + await RedisAsync.AddItemToSetAsync("set", "SET"); + await RedisAsync.AddItemToSortedSetAsync("zset", "ZSET", 1); + + var keys = new[] { "string", "list", "set", "zset" }; + + var results = new Dictionary(); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + foreach (var key in keys) + { + trans.QueueCommand(r => r.TypeAsync(key), x => results[key] = x); + } + + await trans.CommitAsync(); + } + + results.PrintDump(); + + Assert.That(results, Is.EquivalentTo(new Dictionary + { + {"string", "string" }, + {"list", "list" }, + {"set", "set" }, + {"zset", "zset" }, + })); + } + + [Test] + public async Task Can_call_HashSet_commands_in_transaction() + { + await RedisAsync.AddItemToSetAsync("set", "ITEM 1"); + await RedisAsync.AddItemToSetAsync("set", "ITEM 2"); + HashSet result = null; + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.GetAllItemsFromSetAsync("set"), values => result = values); + + await trans.CommitAsync(); + } + + Assert.That(result, Is.EquivalentTo(new[] { "ITEM 1", "ITEM 2" })); + } + + [Test] + public async Task Can_call_LUA_Script_in_transaction() + { + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.ExecLuaAsync("return {'myval', 'myotherval'}", new string[0])); + + await trans.CommitAsync(); + } + + RedisText result = null; + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.ExecLuaAsync("return {'myval', 'myotherval'}", new string[0]), s => result = s); + + await trans.CommitAsync(); + } + + Assert.That(result.Children[0].Text, Is.EqualTo("myval")); + Assert.That(result.Children[1].Text, Is.EqualTo("myotherval")); + } + + [Test] + public async Task Can_call_SetValueIfNotExists_in_transaction() + { + bool f = false; + bool s = false; + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(c => c.SetValueIfNotExistsAsync("foo", "blah"), r => f = r); + trans.QueueCommand(c => c.SetValueIfNotExistsAsync("bar", "blah"), r => s = r); + await trans.CommitAsync(); + } + + Assert.That(f); + Assert.That(s); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs index 77b9271a..b3460118 100644 --- a/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs @@ -1,187 +1,188 @@ using System; using System.Collections.Generic; using NUnit.Framework; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisTransactionTests - : RedisClientTestsBase - { - private const string Key = "rdtmultitest"; + [TestFixture] + public class RedisTransactionTests + : RedisClientTestsBase + { + private const string Key = "rdtmultitest"; private const string ListKey = "rdtmultitest-list"; private const string SetKey = "rdtmultitest-set"; private const string SortedSetKey = "rdtmultitest-sortedset"; + private const string HashKey = "rdthashtest"; - public override void TearDown() + public override void OnAfterEachTest() { CleanMask = Key + "*"; - base.TearDown(); + base.OnAfterEachTest(); } - [Test] - public void Can_call_single_operation_in_transaction() - { - Assert.That(Redis.GetValue(Key), Is.Null); - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key)); - var map = new Dictionary(); - trans.QueueCommand(r => r.Get(Key), y => map[Key] = y); - - trans.Commit(); - } - - Assert.That(Redis.GetValue(Key), Is.EqualTo("1")); - } - - [Test] - public void No_commit_of_atomic_transactions_discards_all_commands() - { - Assert.That(Redis.GetValue(Key), Is.Null); - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key)); - } - Assert.That(Redis.GetValue(Key), Is.Null); - } + [Test] + public void Can_call_single_operation_in_transaction() + { + Assert.That(Redis.GetValue(Key), Is.Null); + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key)); + var map = new Dictionary(); + trans.QueueCommand(r => r.Get(Key), y => map[Key] = y); + + trans.Commit(); + } + + Assert.That(Redis.GetValue(Key), Is.EqualTo("1")); + } + + [Test] + public void No_commit_of_atomic_transactions_discards_all_commands() + { + Assert.That(Redis.GetValue(Key), Is.Null); + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key)); + } + Assert.That(Redis.GetValue(Key), Is.Null); + } [Test] public void Watch_aborts_transaction() { Assert.That(Redis.GetValue(Key), Is.Null); const string value1 = "value1"; - const string value2 = "value2"; try { Redis.Watch(Key); Redis.Set(Key, value1); using (var trans = Redis.CreateTransaction()) { - trans.QueueCommand(r => r.Set(Key,value1)); + trans.QueueCommand(r => r.Set(Key, value1)); var success = trans.Commit(); Assert.False(success); Assert.AreEqual(value1, Redis.Get(Key)); } } - catch (NotSupportedException ignore) + catch (NotSupportedException) + { + Assert.That(Redis.GetValue(Key), Is.Null); + } + } + + [Test] + public void Exception_in_atomic_transactions_discards_all_commands() + { + Assert.That(Redis.GetValue(Key), Is.Null); + try + { + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key)); + throw new NotSupportedException(); + } + } + catch (NotSupportedException) { Assert.That(Redis.GetValue(Key), Is.Null); } } - [Test] - public void Exception_in_atomic_transactions_discards_all_commands() - { - Assert.That(Redis.GetValue(Key), Is.Null); - try - { - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key)); - throw new NotSupportedException(); - } - } - catch (NotSupportedException ignore) - { - Assert.That(Redis.GetValue(Key), Is.Null); - } - } - - [Test] - public void Can_call_single_operation_3_Times_in_transaction() - { - Assert.That(Redis.GetValue(Key), Is.Null); - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key)); - trans.QueueCommand(r => r.IncrementValue(Key)); - trans.QueueCommand(r => r.IncrementValue(Key)); - - trans.Commit(); - } - - Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); - } - - [Test] - public void Can_call_single_operation_with_callback_3_Times_in_transaction() - { - var results = new List(); - Assert.That(Redis.GetValue(Key), Is.Null); - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key), results.Add); - trans.QueueCommand(r => r.IncrementValue(Key), results.Add); - trans.QueueCommand(r => r.IncrementValue(Key), results.Add); - - trans.Commit(); - } - - Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); - Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); - } - - [Test] - public void Supports_different_operation_types_in_same_transaction() - { - var incrementResults = new List(); - var collectionCounts = new List(); - var containsItem = false; - - Assert.That(Redis.GetValue(Key), Is.Null); - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); - trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); - trans.QueueCommand(r => r.AddItemToSet(SetKey, "setitem")); - trans.QueueCommand(r => r.SetContainsItem(SetKey, "setitem"), b => containsItem = b); - trans.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem1")); - trans.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem2")); - trans.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem3")); - trans.QueueCommand(r => r.GetListCount(ListKey), intResult => collectionCounts.Add(intResult)); - trans.QueueCommand(r => r.GetSetCount(SetKey), intResult => collectionCounts.Add(intResult)); - trans.QueueCommand(r => r.GetSortedSetCount(SortedSetKey), intResult => collectionCounts.Add(intResult)); - trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); - - trans.Commit(); - } - - Assert.That(containsItem, Is.True); - Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); - Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); - Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); - Assert.That(Redis.GetAllItemsFromList(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); - Assert.That(Redis.GetAllItemsFromSet(SetKey), Is.EquivalentTo(new List { "setitem" })); - Assert.That(Redis.GetAllItemsFromSortedSet(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); - } - - [Test] - public void Can_call_multi_string_operations_in_transaction() - { - string item1 = null; - string item4 = null; - - var results = new List(); - Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(0)); - using (var trans = Redis.CreateTransaction()) - { - trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); - trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); - trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem3")); - trans.QueueCommand(r => r.GetAllItemsFromList(ListKey), x => results = x); - trans.QueueCommand(r => r.GetItemFromList(ListKey, 0), x => item1 = x); - trans.QueueCommand(r => r.GetItemFromList(ListKey, 4), x => item4 = x); - - trans.Commit(); - } - - Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(3)); - Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); - Assert.That(item1, Is.EqualTo("listitem1")); - Assert.That(item4, Is.Null); - } + [Test] + public void Can_call_single_operation_3_Times_in_transaction() + { + Assert.That(Redis.GetValue(Key), Is.Null); + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key)); + trans.QueueCommand(r => r.IncrementValue(Key)); + trans.QueueCommand(r => r.IncrementValue(Key)); + + trans.Commit(); + } + + Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); + } + + [Test] + public void Can_call_single_operation_with_callback_3_Times_in_transaction() + { + var results = new List(); + Assert.That(Redis.GetValue(Key), Is.Null); + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key), results.Add); + trans.QueueCommand(r => r.IncrementValue(Key), results.Add); + trans.QueueCommand(r => r.IncrementValue(Key), results.Add); + + trans.Commit(); + } + + Assert.That(Redis.GetValue(Key), Is.EqualTo("3")); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public void Supports_different_operation_types_in_same_transaction() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + Assert.That(Redis.GetValue(Key), Is.Null); + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); + trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); + trans.QueueCommand(r => r.AddItemToSet(SetKey, "setitem")); + trans.QueueCommand(r => r.SetContainsItem(SetKey, "setitem"), b => containsItem = b); + trans.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem1")); + trans.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem2")); + trans.QueueCommand(r => r.AddItemToSortedSet(SortedSetKey, "sortedsetitem3")); + trans.QueueCommand(r => r.GetListCount(ListKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSetCount(SetKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSortedSetCount(SortedSetKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.IncrementValue(Key), intResult => incrementResults.Add(intResult)); + + trans.Commit(); + } + + Assert.That(containsItem, Is.True); + Assert.That(Redis.GetValue(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + Assert.That(Redis.GetAllItemsFromList(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); + Assert.That(Redis.GetAllItemsFromSet(SetKey), Is.EquivalentTo(new List { "setitem" })); + Assert.That(Redis.GetAllItemsFromSortedSet(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); + } + + [Test] + public void Can_call_multi_string_operations_in_transaction() + { + string item1 = null; + string item4 = null; + + var results = new List(); + Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(0)); + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem1")); + trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem2")); + trans.QueueCommand(r => r.AddItemToList(ListKey, "listitem3")); + trans.QueueCommand(r => r.GetAllItemsFromList(ListKey), x => results = x); + trans.QueueCommand(r => r.GetItemFromList(ListKey, 0), x => item1 = x); + trans.QueueCommand(r => r.GetItemFromList(ListKey, 4), x => item4 = x); + + trans.Commit(); + } + + Assert.That(Redis.GetListCount(ListKey), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); + Assert.That(item1, Is.EqualTo("listitem1")); + Assert.That(item4, Is.Null); + } [Test] public void Can_call_multiple_setexs_in_transaction() { @@ -213,12 +214,12 @@ public void Can_call_operation_not_supported_on_older_servers_in_transaction() var temp = new byte[1]; using (var trans = Redis.CreateTransaction()) { - trans.QueueCommand(r => ((RedisNativeClient)r).SetEx(Key,5,temp)); + trans.QueueCommand(r => ((RedisNativeClient)r).SetEx(Key, 5, temp)); trans.Commit(); } } - + [Test] public void Transaction_can_be_replayed() { @@ -244,7 +245,8 @@ public void Transaction_can_be_replayed() Assert.That(Redis.GetValue(KeySquared), Is.EqualTo("1")); } } - [Test] + + [Test] public void Transaction_can_issue_watch() { Redis.Del(Key); @@ -252,7 +254,7 @@ public void Transaction_can_issue_watch() string KeySquared = Key + Key; Redis.Del(KeySquared); - + Redis.Watch(Key, KeySquared); Redis.Set(Key, 7); @@ -264,8 +266,158 @@ public void Transaction_can_issue_watch() } Assert.That(Redis.GetValue(Key), Is.EqualTo("7")); - Assert.That(Redis.GetValue(KeySquared), Is.Null); + Assert.That(Redis.GetValue(KeySquared), Is.Null); + } + + [Test] + public void Can_set_Expiry_on_key_in_transaction() + { + var expiresIn = TimeSpan.FromMinutes(15); + + const string key = "No TTL-Transaction"; + var keyWithTtl = "{0}s TTL-Transaction".Fmt(expiresIn.TotalSeconds); + + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.Add(key, "Foo")); + trans.QueueCommand(r => r.Add(keyWithTtl, "Bar", expiresIn)); + + if (!trans.Commit()) + throw new Exception("Transaction Failed"); + } + + Assert.That(Redis.Get(key), Is.EqualTo("Foo")); + Assert.That(Redis.Get(keyWithTtl), Is.EqualTo("Bar")); + + Assert.That(Redis.GetTimeToLive(key), Is.EqualTo(TimeSpan.MaxValue)); + Assert.That(Redis.GetTimeToLive(keyWithTtl).Value.TotalSeconds, Is.GreaterThan(1)); + } + + [Test] + public void Does_not_set_Expiry_on_existing_key_in_transaction() + { + var expiresIn = TimeSpan.FromMinutes(15); + + var key = "Exting TTL-Transaction"; + Redis.Add(key, "Foo"); + + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.Add(key, "Bar", expiresIn)); + + if (!trans.Commit()) + throw new Exception("Transaction Failed"); + } + + Assert.That(Redis.Get(key), Is.EqualTo("Foo")); + Assert.That(Redis.GetTimeToLive(key), Is.EqualTo(TimeSpan.MaxValue)); + } + + [Test] + public void Can_call_GetAllEntriesFromHash_in_transaction() + { + var stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + stringMap.Each(x => Redis.SetEntryInHash(HashKey, x.Key, x.Value)); + + Dictionary results = null; + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.GetAllEntriesFromHash(HashKey), x => results = x); + + trans.Commit(); + } + + Assert.That(results, Is.EquivalentTo(stringMap)); + } + + [Test] + public void Can_call_Type_in_transaction() + { + Redis.SetValue("string", "STRING"); + Redis.AddItemToList("list", "LIST"); + Redis.AddItemToSet("set", "SET"); + Redis.AddItemToSortedSet("zset", "ZSET", 1); + + var keys = new[] { "string", "list", "set", "zset" }; + + var results = new Dictionary(); + using (var trans = Redis.CreateTransaction()) + { + foreach (var key in keys) + { + trans.QueueCommand(r => r.Type(key), x => results[key] = x); + } + + trans.Commit(); + } + + results.PrintDump(); + + Assert.That(results, Is.EquivalentTo(new Dictionary + { + {"string", "string" }, + {"list", "list" }, + {"set", "set" }, + {"zset", "zset" }, + })); + } + + [Test] + public void Can_call_HashSet_commands_in_transaction() + { + Redis.AddItemToSet("set", "ITEM 1"); + Redis.AddItemToSet("set", "ITEM 2"); + HashSet result = null; + + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.GetAllItemsFromSet("set"), values => result = values); + + trans.Commit(); + } + + Assert.That(result, Is.EquivalentTo(new[] { "ITEM 1", "ITEM 2" })); + } + + [Test] + public void Can_call_LUA_Script_in_transaction() + { + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.ExecLua("return {'myval', 'myotherval'}")); + + trans.Commit(); + } + + RedisText result = null; + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(r => r.ExecLua("return {'myval', 'myotherval'}"), s => result = s); + + trans.Commit(); + } + + Assert.That(result.Children[0].Text, Is.EqualTo("myval")); + Assert.That(result.Children[1].Text, Is.EqualTo("myotherval")); + } + + [Test] + public void Can_call_SetValueIfNotExists_in_transaction() + { + bool f = false; + bool s = false; + + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(c => c.SetValueIfNotExists("foo", "blah"), r => f = r); + trans.QueueCommand(c => c.SetValueIfNotExists("bar", "blah"), r => s = r); + trans.Commit(); + } + Assert.That(f); + Assert.That(s); } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisUtilTests.cs b/tests/ServiceStack.Redis.Tests/RedisUtilTests.cs index ce3c9130..48b02a2c 100644 --- a/tests/ServiceStack.Redis.Tests/RedisUtilTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisUtilTests.cs @@ -4,26 +4,26 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class RedisUtilTests - { - [Test] - public void Can_Calculate_Lexical_Score() - { - const string minScore = "AAAA"; - const string nextMinScore = "AAAB"; - const string maxScore = "ZZZZ"; + [TestFixture] + public class RedisUtilTests + { + [Test] + public void Can_Calculate_Lexical_Score() + { + const string minScore = "AAAA"; + const string nextMinScore = "AAAB"; + const string maxScore = "ZZZZ"; - Assert.That(RedisClient.GetLexicalScore(minScore), - Is.LessThan(RedisClient.GetLexicalScore(nextMinScore))); + Assert.That(RedisClient.GetLexicalScore(minScore), + Is.LessThan(RedisClient.GetLexicalScore(nextMinScore))); - Assert.That(RedisClient.GetLexicalScore(nextMinScore), - Is.LessThan(RedisClient.GetLexicalScore(maxScore))); + Assert.That(RedisClient.GetLexicalScore(nextMinScore), + Is.LessThan(RedisClient.GetLexicalScore(maxScore))); - Debug.WriteLine(String.Format("Lexical Score of '{0}' is: {1}", minScore, RedisClient.GetLexicalScore(minScore))); - Debug.WriteLine(String.Format("Lexical Score of '{0}' is: {1}", nextMinScore, RedisClient.GetLexicalScore(nextMinScore))); - Debug.WriteLine(String.Format("Lexical Score of '{0}' is: {1}", maxScore, RedisClient.GetLexicalScore(maxScore))); - } - } + Debug.WriteLine(String.Format("Lexical Score of '{0}' is: {1}", minScore, RedisClient.GetLexicalScore(minScore))); + Debug.WriteLine(String.Format("Lexical Score of '{0}' is: {1}", nextMinScore, RedisClient.GetLexicalScore(nextMinScore))); + Debug.WriteLine(String.Format("Lexical Score of '{0}' is: {1}", maxScore, RedisClient.GetLexicalScore(maxScore))); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RetryCommandTests.Async.cs b/tests/ServiceStack.Redis.Tests/RetryCommandTests.Async.cs new file mode 100644 index 00000000..6e2ed462 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RetryCommandTests.Async.cs @@ -0,0 +1,143 @@ +using NUnit.Framework; +using System; +using System.Linq; +using System.Net.Sockets; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + public class RetryCommandTestsAsync + { + [Test, Ignore("3 vs 2 needs investigation; does same in non-async")] + public async Task Does_retry_failed_commands() + { + // warning: this test looks brittle; is often failing "Expected: 3 But was: 2" (on main branch); + + // LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); + // RedisConfig.EnableVerboseLogging = true; + RedisStats.Reset(); + + var redisCtrl = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redisCtrl.FlushAllAsync(); + await redisCtrl.SetClientAsync("redisCtrl"); + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.SetClientAsync("redisRetry"); + + var clientInfo = await redisCtrl.GetClientsInfoAsync(); + var redisId = clientInfo.First(m => m["name"] == "redisRetry")["id"]; + Assert.That(redisId.Length, Is.GreaterThan(0)); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + ((IRedisClient)redisCtrl).KillClients(withId: redisId); + }; + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(2)); + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(3)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + + [Test] + public async Task Does_retry_failed_commands_with_SocketException() + { + RedisStats.Reset(); + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.FlushAllAsync(); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + ((RedisClient)redis).OnBeforeFlush = null; + throw new SocketException(); + }; + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(2)); + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(3)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + + [Test] + public async Task Does_Timeout_with_repeated_SocketException() + { + RedisConfig.Reset(); + RedisConfig.DefaultRetryTimeout = 100; + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.FlushAllAsync(); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + throw new SocketException(); + }; + + try + { + await redis.IncrementValueAsync("retryCounter"); + Assert.Fail("Should throw"); + } + catch (RedisException ex) + { + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); + + ((RedisClient)redis).OnBeforeFlush = null; + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(1)); + + Assert.That(RedisStats.TotalRetryCount, Is.GreaterThan(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(1)); + } + + RedisConfig.Reset(); + } + + [Test] + public async Task Does_not_retry_when_RetryTimeout_is_Zero() + { + RedisConfig.Reset(); + RedisConfig.DefaultRetryTimeout = 0; + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.FlushAllAsync(); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + throw new SocketException(); + }; + + try + { + await redis.IncrementValueAsync("retryCounter"); + Assert.Fail("Should throw"); + } + catch (Exception ex) + { + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); + + ((RedisClient)redis).OnBeforeFlush = null; + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(1)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(1)); + } + + RedisConfig.Reset(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs b/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs new file mode 100644 index 00000000..e81575d4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs @@ -0,0 +1,145 @@ +using System; +using System.Linq; +using System.Net.Sockets; +using System.Threading; +using NUnit.Framework; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RetryCommandTests + { + [Test] + public void Does_retry_failed_commands() + { + // warning: this test looks brittle; is often failing "Expected: 3 But was: 2" (on main branch); + + // LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); + // RedisConfig.EnableVerboseLogging = true; + RedisStats.Reset(); + + var redisCtrl = new RedisClient(RedisConfig.DefaultHost); + redisCtrl.FlushAll(); + redisCtrl.SetClient("redisCtrl"); + + var redis = new RedisClient(RedisConfig.DefaultHost); + redis.SetClient("redisRetry"); + + var clientInfo = redisCtrl.GetClientsInfo(); + var redisId = clientInfo.First(m => m["name"] == "redisRetry")["id"]; + Assert.That(redisId.Length, Is.GreaterThan(0)); + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(1)); + + redis.OnBeforeFlush = () => + { + redisCtrl.KillClients(withId: redisId); + }; + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(2)); + Assert.That(redis.Get("retryCounter"), Is.EqualTo(3)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + + [Test] + public void Does_retry_failed_commands_with_SocketException() + { + RedisStats.Reset(); + + var redis = new RedisClient(RedisConfig.DefaultHost); + redis.FlushAll(); + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(1)); + + redis.OnBeforeFlush = () => + { + redis.OnBeforeFlush = null; + throw new SocketException(); + }; + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(2)); + Assert.That(redis.Get("retryCounter"), Is.EqualTo(3)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + + [Test] + public void Does_Timeout_with_repeated_SocketException() + { + RedisConfig.Reset(); + RedisConfig.DefaultRetryTimeout = 100; + + var redis = new RedisClient(RedisConfig.DefaultHost); + redis.FlushAll(); + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(1)); + + redis.OnBeforeFlush = () => + { + throw new SocketException(); + }; + + try + { + redis.IncrementValue("retryCounter"); + Assert.Fail("Should throw"); + } + catch (RedisException ex) + { + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); + + redis.OnBeforeFlush = null; + Assert.That(redis.Get("retryCounter"), Is.EqualTo(1)); + + Assert.That(RedisStats.TotalRetryCount, Is.GreaterThan(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(1)); + } + + RedisConfig.Reset(); + } + + [Test] + public void Does_not_retry_when_RetryTimeout_is_Zero() + { + RedisConfig.Reset(); + RedisConfig.DefaultRetryTimeout = 0; + + var redis = new RedisClient(RedisConfig.DefaultHost); + redis.FlushAll(); + + Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(1)); + + redis.OnBeforeFlush = () => + { + throw new SocketException(); + }; + + try + { + redis.IncrementValue("retryCounter"); + Assert.Fail("Should throw"); + } + catch (Exception ex) + { + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); + + redis.OnBeforeFlush = null; + Assert.That(redis.Get("retryCounter"), Is.EqualTo(1)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(1)); + } + + RedisConfig.Reset(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/SerializationTests.cs b/tests/ServiceStack.Redis.Tests/SerializationTests.cs index 5c5849ae..a425a741 100644 --- a/tests/ServiceStack.Redis.Tests/SerializationTests.cs +++ b/tests/ServiceStack.Redis.Tests/SerializationTests.cs @@ -8,38 +8,38 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class SerializationTests - { - public class Tuple - { - public Tuple() - { - } + [TestFixture] + public class SerializationTests + { + public class Tuple + { + public Tuple() + { + } - public Tuple(Type type, object value) - { - Type = type; - Value = value; - } + public Tuple(Type type, object value) + { + Type = type; + Value = value; + } - public Type Type { get; set; } - public object Value { get; set; } - } + public Type Type { get; set; } + public object Value { get; set; } + } - [Test] - public void Can_Serialize_type_with_object() - { - var obj = new CustomType { CustomId = 1, CustomName = "Name" }; - var typeWithObject = new Tuple(obj.GetType(), obj); - byte[] bytes = Encoding.UTF8.GetBytes(JsonSerializer.SerializeToString(typeWithObject)); + [Test] + public void Can_Serialize_type_with_object() + { + var obj = new CustomType { CustomId = 1, CustomName = "Name" }; + var typeWithObject = new Tuple(obj.GetType(), obj); + byte[] bytes = Encoding.UTF8.GetBytes(JsonSerializer.SerializeToString(typeWithObject)); - var bytesStr = Encoding.UTF8.GetString(bytes); - var fromTypeWithObject = JsonSerializer.DeserializeFromString(bytesStr); - var newObj = fromTypeWithObject.Value as CustomType; + var bytesStr = Encoding.UTF8.GetString(bytes); + var fromTypeWithObject = JsonSerializer.DeserializeFromString(bytesStr); + var newObj = fromTypeWithObject.Value as CustomType; - Assert.That(newObj.CustomId, Is.EqualTo(obj.CustomId)); - Assert.That(newObj.CustomName, Is.EqualTo(obj.CustomName)); - } - } + Assert.That(newObj.CustomId, Is.EqualTo(obj.CustomId)); + Assert.That(newObj.CustomName, Is.EqualTo(obj.CustomName)); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj index 9e8fafe4..374a13f9 100644 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj +++ b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj @@ -1,288 +1,54 @@ - - + - Debug - AnyCPU - 9.0.30729 - 2.0 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A} - Library - Properties - ServiceStack.Redis.Tests + + net472;net6.0 + portable ServiceStack.Redis.Tests - v3.5 - 512 - - - 3.5 - - publish\ - true - Disk - false - Foreground - 7 - Days - false - false - true - 0 - 1.0.0.%2a - false - false - true - - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - AllRules.ruleset - - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - AllRules.ruleset - x86 - - - true - bin\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - - - true - bin\MonoTouch\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - - - true - bin\x86\Debug\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - true - true - - - bin\x86\Release\ - TRACE - true - pdbonly - x86 - prompt - AllRules.ruleset - true - true - - - true - bin\x86\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - true - false - - - true - bin\x86\MonoTouch\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - false + Library + ServiceStack.Redis.Tests + false + false + false + false + false + false + false + false - - False - ..\..\lib\tests\Moq.dll - - - False - ..\..\lib\tests\Northwind.Common.dll - - - False - ..\..\lib\tests\nunit.framework.dll - - - ..\..\lib\tests\ServiceStack.dll - - - ..\..\lib\ServiceStack.Common.dll - - - ..\..\lib\tests\ServiceStack.Common.Tests.dll - - - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\tests\ServiceStack.Messaging.Tests.dll - - - False - ..\..\lib\tests\ServiceStack.ServiceInterface.dll - - - ..\..\lib\ServiceStack.Text.dll - - - - 3.5 - - - 3.5 - - - 3.5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Code - - - - - - - - - - - - - - - - - - - Code - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - + + $(DefineConstants);NET45 + + + + - - - False - .NET Framework 3.5 SP1 Client Profile - false - - - False - .NET Framework 3.5 SP1 - true - - - False - Windows Installer 3.1 - true - + + $(DefineConstants);NETCORE + + + + + + + + + + - - - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - ServiceStack.Redis - + - - \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/.gitignore b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/.gitignore deleted file mode 100644 index cbbd0b5c..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -bin/ -obj/ \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs deleted file mode 100644 index 95f147e5..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs +++ /dev/null @@ -1,191 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.CacheAccess; - -namespace ServiceStack.Redis -{ - /// - /// BasicRedisClientManager for ICacheClient - /// - /// For more interoperabilty I'm also implementing the ICacheClient on - /// this cache client manager which has the affect of calling - /// GetCacheClient() for all write operations and GetReadOnlyCacheClient() - /// for the read ones. - /// - /// This works well for master-slave replication scenarios where you have - /// 1 master that replicates to multiple read slaves. - /// - public partial class BasicRedisClientManager - : ICacheClient - { - public const int DefaultCacheDb = 9; - - public ICacheClient GetCacheClient() - { - return ConfigureRedisClient(this.GetClient()); - } - - public ICacheClient GetReadOnlyCacheClient() - { - return ConfigureRedisClient(this.GetReadOnlyClient()); - } - - private ICacheClient ConfigureRedisClient(IRedisClient client) - { - //Provide automatic partitioning of 'Redis Caches' from normal persisted data - //which is on DB '0' by default. - var notUserSpecified = this.Db == RedisNativeClient.DefaultDb; - if (notUserSpecified) - { - client.Db = DefaultCacheDb; - } - return client; - } - - #region Implementation of ICacheClient - - public bool Remove(string key) - { - using (var client = GetReadOnlyCacheClient()) - { - return client.Remove(key); - } - } - - public void RemoveAll(IEnumerable keys) - { - using (var client = GetCacheClient()) - { - client.RemoveAll(keys); - } - } - - public T Get(string key) - { - using (var client = GetReadOnlyCacheClient()) - { - return client.Get(key); - } - } - - public long Increment(string key, uint amount) - { - using (var client = GetCacheClient()) - { - return client.Increment(key, amount); - } - } - - public long Decrement(string key, uint amount) - { - using (var client = GetCacheClient()) - { - return client.Decrement(key, amount); - } - } - - public bool Add(string key, T value) - { - using (var client = GetCacheClient()) - { - return client.Add(key, value); - } - } - - public bool Set(string key, T value) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value); - } - } - - public bool Replace(string key, T value) - { - using (var client = GetCacheClient()) - { - return client.Replace(key, value); - } - } - - public bool Add(string key, T value, DateTime expiresAt) - { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresAt); - } - } - - public bool Set(string key, T value, DateTime expiresAt) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresAt); - } - } - - public bool Replace(string key, T value, DateTime expiresAt) - { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresAt); - } - } - - public bool Add(string key, T value, TimeSpan expiresIn) - { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresIn); - } - } - - public bool Set(string key, T value, TimeSpan expiresIn) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresIn); - } - } - - public bool Replace(string key, T value, TimeSpan expiresIn) - { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresIn); - } - } - - public void FlushAll() - { - using (var client = GetCacheClient()) - { - client.FlushAll(); - } - } - - public IDictionary GetAll(IEnumerable keys) - { - using (var client = GetReadOnlyCacheClient()) - { - return client.GetAll(keys); - } - } - - #endregion - } - - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/BasicRedisClientManager.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/BasicRedisClientManager.cs deleted file mode 100644 index 08bee3b0..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/BasicRedisClientManager.cs +++ /dev/null @@ -1,133 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.Common.Web; - -namespace ServiceStack.Redis -{ - /// - /// Provides thread-safe retrievel of redis clients since each client is a new one. - /// Allows the configuration of different ReadWrite and ReadOnly hosts - /// - public partial class BasicRedisClientManager - : IRedisClientsManager - { - private List ReadWriteHosts { get; set; } - private List ReadOnlyHosts { get; set; } - - private int readWriteHostsIndex; - private int readOnlyHostsIndex; - - public IRedisClientFactory RedisClientFactory { get; set; } - - public int Db { get; private set; } - - public BasicRedisClientManager() : this(RedisNativeClient.DefaultHost) { } - - public BasicRedisClientManager(params string[] readWriteHosts) - : this(readWriteHosts, readWriteHosts) - { - } - - /// - /// Hosts can be an IP Address or Hostname in the format: host[:port] - /// e.g. 127.0.0.1:6379 - /// default is: localhost:6379 - /// - /// The write hosts. - /// The read hosts. - public BasicRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts) - : this(readWriteHosts, readOnlyHosts, RedisNativeClient.DefaultDb) - { - } - - public BasicRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - int initalDb) - { - this.Db = initalDb; - - ReadWriteHosts = readWriteHosts.ToIpEndPoints(); - ReadOnlyHosts = readOnlyHosts.ToIpEndPoints(); - - this.RedisClientFactory = Redis.RedisClientFactory.Instance; - - this.OnStart(); - } - - protected virtual void OnStart() - { - this.Start(); - } - - /// - /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - /// - /// - public IRedisClient GetClient() - { - var nextHost = ReadWriteHosts[readWriteHostsIndex++ % ReadWriteHosts.Count]; - var client = RedisClientFactory.CreateRedisClient( - nextHost.Host, nextHost.Port); - - //Set database to userSpecified if different - if (Db != RedisNativeClient.DefaultDb) - { - client.Db = Db; - } - - return client; - } - - /// - /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - /// - /// - public virtual IRedisClient GetReadOnlyClient() - { - var nextHost = ReadOnlyHosts[readOnlyHostsIndex++ % ReadOnlyHosts.Count]; - var client = RedisClientFactory.CreateRedisClient( - nextHost.Host, nextHost.Port); - - //Set database to userSpecified if different - if (Db != RedisNativeClient.DefaultDb) - { - client.Db = Db; - } - - return client; - } - - public void SetAll(IDictionary values) - { - foreach (var entry in values) - { - Set(entry.Key, entry.Value); - } - } - - public void Start() - { - readWriteHostsIndex = 0; - readOnlyHostsIndex = 0; - } - - public void Dispose() - { - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Commands.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Commands.cs deleted file mode 100644 index 7c586524..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Commands.cs +++ /dev/null @@ -1,138 +0,0 @@ -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public static class Commands - { - public readonly static byte[] Quit = "QUIT".ToUtf8Bytes(); - public readonly static byte[] Auth = "AUTH".ToUtf8Bytes(); - public readonly static byte[] Exists = "EXISTS".ToUtf8Bytes(); - public readonly static byte[] Del = "DEL".ToUtf8Bytes(); - public readonly static byte[] Type = "TYPE".ToUtf8Bytes(); - public readonly static byte[] Keys = "KEYS".ToUtf8Bytes(); - public readonly static byte[] RandomKey = "RANDOMKEY".ToUtf8Bytes(); - public readonly static byte[] Rename = "RENAME".ToUtf8Bytes(); - public readonly static byte[] RenameNx = "RENAMENX".ToUtf8Bytes(); - public readonly static byte[] DbSize = "DBSIZE".ToUtf8Bytes(); - public readonly static byte[] Expire = "EXPIRE".ToUtf8Bytes(); - public readonly static byte[] ExpireAt = "EXPIREAT".ToUtf8Bytes(); - public readonly static byte[] Ttl = "TTL".ToUtf8Bytes(); - public readonly static byte[] Select = "SELECT".ToUtf8Bytes(); - public readonly static byte[] Move = "MOVE".ToUtf8Bytes(); - public readonly static byte[] FlushDb = "FLUSHDB".ToUtf8Bytes(); - public readonly static byte[] FlushAll = "FLUSHALL".ToUtf8Bytes(); - public readonly static byte[] Ping = "PING".ToUtf8Bytes(); - public readonly static byte[] Echo = "ECHO".ToUtf8Bytes(); - - public readonly static byte[] Save = "SAVE".ToUtf8Bytes(); - public readonly static byte[] BgSave = "BGSAVE".ToUtf8Bytes(); - public readonly static byte[] LastSave = "LASTSAVE".ToUtf8Bytes(); - public readonly static byte[] Shutdown = "SHUTDOWN".ToUtf8Bytes(); - public readonly static byte[] BgRewriteAof = "BGREWRITEAOF".ToUtf8Bytes(); - - public readonly static byte[] Info = "INFO".ToUtf8Bytes(); - public readonly static byte[] SlaveOf = "SLAVEOF".ToUtf8Bytes(); - public readonly static byte[] Monitor = "MONITOR".ToUtf8Bytes(); //missing - public readonly static byte[] Debug = "DEBUG".ToUtf8Bytes(); //missing - public readonly static byte[] Config = "CONFIG".ToUtf8Bytes(); //missing - - - public readonly static byte[] Set = "SET".ToUtf8Bytes(); - public readonly static byte[] Get = "GET".ToUtf8Bytes(); - public readonly static byte[] GetSet = "GETSET".ToUtf8Bytes(); - public readonly static byte[] MGet = "MGET".ToUtf8Bytes(); - public readonly static byte[] SetNx = "SETNX".ToUtf8Bytes(); - public readonly static byte[] SetEx = "SETEX".ToUtf8Bytes(); - public readonly static byte[] MSet = "MSET".ToUtf8Bytes(); - public readonly static byte[] MSetNx = "MSETNX".ToUtf8Bytes(); - public readonly static byte[] Incr = "INCR".ToUtf8Bytes(); - public readonly static byte[] IncrBy = "INCRBY".ToUtf8Bytes(); - public readonly static byte[] Decr = "DECR".ToUtf8Bytes(); - public readonly static byte[] DecrBy = "DECRBY".ToUtf8Bytes(); - public readonly static byte[] Append = "APPEND".ToUtf8Bytes(); - public readonly static byte[] Substr = "SUBSTR".ToUtf8Bytes(); - - public readonly static byte[] RPush = "RPUSH".ToUtf8Bytes(); - public readonly static byte[] LPush = "LPUSH".ToUtf8Bytes(); - public readonly static byte[] LLen = "LLEN".ToUtf8Bytes(); - public readonly static byte[] LRange = "LRANGE".ToUtf8Bytes(); - public readonly static byte[] LTrim = "LTRIM".ToUtf8Bytes(); - public readonly static byte[] LIndex = "LINDEX".ToUtf8Bytes(); - public readonly static byte[] LSet = "LSET".ToUtf8Bytes(); - public readonly static byte[] LRem = "LREM".ToUtf8Bytes(); - public readonly static byte[] LPop = "LPOP".ToUtf8Bytes(); - public readonly static byte[] RPop = "RPOP".ToUtf8Bytes(); - public readonly static byte[] BLPop = "BLPOP".ToUtf8Bytes(); - public readonly static byte[] BRPop = "BRPOP".ToUtf8Bytes(); - public readonly static byte[] RPopLPush = "RPOPLPUSH".ToUtf8Bytes(); - - public readonly static byte[] SAdd = "SADD".ToUtf8Bytes(); - public readonly static byte[] SRem = "SREM".ToUtf8Bytes(); - public readonly static byte[] SPop = "SPOP".ToUtf8Bytes(); - public readonly static byte[] SMove = "SMOVE".ToUtf8Bytes(); - public readonly static byte[] SCard = "SCARD".ToUtf8Bytes(); - public readonly static byte[] SIsMember = "SISMEMBER".ToUtf8Bytes(); - public readonly static byte[] SInter = "SINTER".ToUtf8Bytes(); - public readonly static byte[] SInterStore = "SINTERSTORE".ToUtf8Bytes(); - public readonly static byte[] SUnion = "SUNION".ToUtf8Bytes(); - public readonly static byte[] SUnionStore = "SUNIONSTORE".ToUtf8Bytes(); - public readonly static byte[] SDiff = "SDIFF".ToUtf8Bytes(); - public readonly static byte[] SDiffStore = "SDIFFSTORE".ToUtf8Bytes(); - public readonly static byte[] SMembers = "SMEMBERS".ToUtf8Bytes(); - public readonly static byte[] SRandMember = "SRANDMEMBER".ToUtf8Bytes(); - - public readonly static byte[] ZAdd = "ZADD".ToUtf8Bytes(); - public readonly static byte[] ZRem = "ZREM".ToUtf8Bytes(); - public readonly static byte[] ZIncrBy = "ZINCRBY".ToUtf8Bytes(); - public readonly static byte[] ZRank = "ZRANK".ToUtf8Bytes(); - public readonly static byte[] ZRevRank = "ZREVRANK".ToUtf8Bytes(); - public readonly static byte[] ZRange = "ZRANGE".ToUtf8Bytes(); - public readonly static byte[] ZRevRange = "ZREVRANGE".ToUtf8Bytes(); - public readonly static byte[] ZRangeByScore = "ZRANGEBYSCORE".ToUtf8Bytes(); - public readonly static byte[] ZRevRangeByScore = "ZREVRANGEBYSCORE".ToUtf8Bytes(); - public readonly static byte[] ZCard = "ZCARD".ToUtf8Bytes(); - public readonly static byte[] ZScore = "ZSCORE".ToUtf8Bytes(); - public readonly static byte[] ZRemRangeByRank = "ZREMRANGEBYRANK".ToUtf8Bytes(); - public readonly static byte[] ZRemRangeByScore = "ZREMRANGEBYSCORE".ToUtf8Bytes(); - public readonly static byte[] ZUnionStore = "ZUNIONSTORE".ToUtf8Bytes(); - public readonly static byte[] ZInterStore = "ZINTERSTORE".ToUtf8Bytes(); - - public readonly static byte[] HSet = "HSET".ToUtf8Bytes(); - public readonly static byte[] HSetNx = "HSETNX".ToUtf8Bytes(); - public readonly static byte[] HGet = "HGET".ToUtf8Bytes(); - public readonly static byte[] HMSet = "HMSET".ToUtf8Bytes(); - public readonly static byte[] HMGet = "HMGET".ToUtf8Bytes(); - public readonly static byte[] HIncrBy = "HINCRBY".ToUtf8Bytes(); - public readonly static byte[] HExists = "HEXISTS".ToUtf8Bytes(); - public readonly static byte[] HDel = "HDEL".ToUtf8Bytes(); - public readonly static byte[] HLen = "HLEN".ToUtf8Bytes(); - public readonly static byte[] HKeys = "HKEYS".ToUtf8Bytes(); - public readonly static byte[] HVals = "HVALS".ToUtf8Bytes(); - public readonly static byte[] HGetAll = "HGETALL".ToUtf8Bytes(); - - public readonly static byte[] Sort = "SORT".ToUtf8Bytes(); - - public readonly static byte[] Multi = "MULTI".ToUtf8Bytes(); - public readonly static byte[] Exec = "EXEC".ToUtf8Bytes(); - public readonly static byte[] Discard = "DISCARD".ToUtf8Bytes(); - - public readonly static byte[] Subscribe = "SUBSCRIBE".ToUtf8Bytes(); - public readonly static byte[] UnSubscribe = "UNSUBSCRIBE".ToUtf8Bytes(); - public readonly static byte[] PSubscribe = "PSUBSCRIBE".ToUtf8Bytes(); - public readonly static byte[] PUnSubscribe = "PUNSUBSCRIBE".ToUtf8Bytes(); - public readonly static byte[] Publish = "PUBLISH".ToUtf8Bytes(); - - - public readonly static byte[] WithScores = "WITHSCORES".ToUtf8Bytes(); - public readonly static byte[] Limit = "LIMIT".ToUtf8Bytes(); - public readonly static byte[] By = "BY".ToUtf8Bytes(); - public readonly static byte[] Asc = "ASC".ToUtf8Bytes(); - public readonly static byte[] Desc = "DESC".ToUtf8Bytes(); - public readonly static byte[] Alpha = "ALPHA".ToUtf8Bytes(); - public readonly static byte[] Store = "STORE".ToUtf8Bytes(); - - //SLAVEOF NO ONE - public readonly static byte[] No = "NO".ToUtf8Bytes(); - public readonly static byte[] One = "ONE".ToUtf8Bytes(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisHash.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisHash.Generic.cs deleted file mode 100644 index c6d76c31..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisHash.Generic.cs +++ /dev/null @@ -1,23 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - public interface IRedisHash : IDictionary, IHasStringId - { - Dictionary GetAll(); - } - -} diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisList.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisList.Generic.cs deleted file mode 100644 index fbb59f90..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisList.Generic.cs +++ /dev/null @@ -1,49 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - /// - /// Wrap the common redis list operations under a IList[string] interface. - /// - - public interface IRedisList - : IList, IHasStringId - { - List GetAll(); - List GetRange(int startingFrom, int endingAt); - List GetRangeFromSortedList(int startingFrom, int endingAt); - void RemoveAll(); - void Trim(int keepStartingFrom, int keepEndingAt); - int RemoveValue(T value); - int RemoveValue(T value, int noOfMatches); - - void Append(T value); - void Prepend(T value); - T RemoveStart(); - T BlockingRemoveStart(TimeSpan? timeOut); - T RemoveEnd(); - - void Enqueue(T value); - T Dequeue(); - T BlockingDequeue(TimeSpan? timeOut); - - void Push(T value); - T Pop(); - T BlockingPop(TimeSpan? timeOut); - T PopAndPush(IRedisList toList); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisSet.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisSet.Generic.cs deleted file mode 100644 index 231e2f06..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisSet.Generic.cs +++ /dev/null @@ -1,31 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - public interface IRedisSet : ICollection, IHasStringId - { - List Sort(int startingFrom, int endingAt); - HashSet GetAll(); - T PopRandomItem(); - T GetRandomItem(); - void MoveTo(T item, IRedisSet toSet); - void PopulateWithIntersectOf(params IRedisSet[] sets); - void PopulateWithUnionOf(params IRedisSet[] sets); - void GetDifferences(params IRedisSet[] withSets); - void PopulateWithDifferencesOf(IRedisSet fromSet, params IRedisSet[] withSets); - } - -} diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisSortedSet.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisSortedSet.Generic.cs deleted file mode 100644 index 7d0c4fea..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisSortedSet.Generic.cs +++ /dev/null @@ -1,38 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - public interface IRedisSortedSet : ICollection, IHasStringId - { - T PopItemWithHighestScore(); - T PopItemWithLowestScore(); - double IncrementItem(T item, double incrementBy); - int IndexOf(T item); - int IndexOfDescending(T item); - List GetAll(); - List GetAllDescending(); - List GetRange(int fromRank, int toRank); - List GetRangeByLowestScore(double fromScore, double toScore); - List GetRangeByLowestScore(double fromScore, double toScore, int? skip, int? take); - List GetRangeByHighestScore(double fromScore, double toScore); - List GetRangeByHighestScore(double fromScore, double toScore, int? skip, int? take); - int RemoveRange(int minRank, int maxRank); - int RemoveRangeByScore(double fromScore, double toScore); - double GetItemScore(T item); - int PopulateWithIntersectOf(params IRedisSortedSet[] setIds); - int PopulateWithUnionOf(params IRedisSortedSet[] setIds); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisTransaction.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisTransaction.cs deleted file mode 100644 index b838ee59..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisTransaction.cs +++ /dev/null @@ -1,60 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; - -namespace ServiceStack.Redis.Generic -{ - public interface IRedisTypedTransaction - : IDisposable - { - void QueueCommand(Action> command); - void QueueCommand(Action> command, Action onSuccessCallback); - void QueueCommand(Action> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, int> command); - void QueueCommand(Func, int> command, Action onSuccessCallback); - void QueueCommand(Func, int> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, bool> command); - void QueueCommand(Func, bool> command, Action onSuccessCallback); - void QueueCommand(Func, bool> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, double> command); - void QueueCommand(Func, double> command, Action onSuccessCallback); - void QueueCommand(Func, double> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, byte[]> command); - void QueueCommand(Func, byte[]> command, Action onSuccessCallback); - void QueueCommand(Func, byte[]> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, string> command); - void QueueCommand(Func, string> command, Action onSuccessCallback); - void QueueCommand(Func, string> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, T> command); - void QueueCommand(Func, T> command, Action onSuccessCallback); - void QueueCommand(Func, T> command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, List> command); - void QueueCommand(Func, List> command, Action> onSuccessCallback); - void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func, List> command); - void QueueCommand(Func, List> command, Action> onSuccessCallback); - void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback); - - void Commit(); - void Rollback(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisTypedClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisTypedClient.cs deleted file mode 100644 index d69b2f75..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/IRedisTypedClient.cs +++ /dev/null @@ -1,161 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.DataAccess; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - public interface IRedisTypedClient - : IBasicPersistenceProvider - { - IHasNamed> Lists { get; set; } - IHasNamed> Sets { get; set; } - IHasNamed> SortedSets { get; set; } - IRedisHash GetHash(string hashId); - - IRedisTypedTransaction CreateTransaction(); - - IDisposable AcquireLock(); - IDisposable AcquireLock(TimeSpan timeOut); - - int Db { get; set; } - List GetAllKeys(); - - T this[string key] { get; set; } - - string SequenceKey { get; set; } - void SetSequence(int value); - int GetNextSequence(); - RedisKeyType GetEntryType(string key); - string GetRandomKey(); - - void SetEntry(string key, T value); - void SetEntry(string key, T value, TimeSpan expireIn); - bool SetEntryIfNotExists(string key, T value); - T GetValue(string key); - T GetAndSetValue(string key, T value); - bool ContainsKey(string key); - bool RemoveEntry(string key); - bool RemoveEntry(params string[] args); - bool RemoveEntry(params IHasStringId[] entities); - int IncrementValue(string key); - int IncrementValueBy(string key, int count); - int DecrementValue(string key); - int DecrementValueBy(string key, int count); - bool ExpireEntryIn(string key, TimeSpan expiresAt); - bool ExpireEntryAt(string key, DateTime dateTime); - TimeSpan GetTimeToLive(string key); - void Save(); - void SaveAsync(); - void FlushDb(); - void FlushAll(); - T[] SearchKeys(string pattern); - List GetValues(List keys); - List GetSortedEntryValues(IRedisSet fromSet, int startingFrom, int endingAt); - - HashSet GetAllItemsFromSet(IRedisSet fromSet); - void AddItemToSet(IRedisSet toSet, T item); - void RemoveItemFromSet(IRedisSet fromSet, T item); - T PopItemFromSet(IRedisSet fromSet); - void MoveBetweenSets(IRedisSet fromSet, IRedisSet toSet, T item); - int GetSetCount(IRedisSet set); - bool SetContainsItem(IRedisSet set, T item); - HashSet GetIntersectFromSets(params IRedisSet[] sets); - void StoreIntersectFromSets(IRedisSet intoSet, params IRedisSet[] sets); - HashSet GetUnionFromSets(params IRedisSet[] sets); - void StoreUnionFromSets(IRedisSet intoSet, params IRedisSet[] sets); - HashSet GetDifferencesFromSet(IRedisSet fromSet, params IRedisSet[] withSets); - void StoreDifferencesFromSet(IRedisSet intoSet, IRedisSet fromSet, params IRedisSet[] withSets); - T GetRandomItemFromSet(IRedisSet fromSet); - - List GetAllItemsFromList(IRedisList fromList); - List GetRangeFromList(IRedisList fromList, int startingFrom, int endingAt); - List SortList(IRedisList fromList, int startingFrom, int endingAt); - void AddItemToList(IRedisList fromList, T value); - void PrependItemToList(IRedisList fromList, T value); - T RemoveStartFromList(IRedisList fromList); - T BlockingRemoveStartFromList(IRedisList fromList, TimeSpan? timeOut); - T RemoveEndFromList(IRedisList fromList); - void RemoveAllFromList(IRedisList fromList); - void TrimList(IRedisList fromList, int keepStartingFrom, int keepEndingAt); - int RemoveItemFromList(IRedisList fromList, T value); - int RemoveItemFromList(IRedisList fromList, T value, int noOfMatches); - int GetListCount(IRedisList fromList); - T GetItemFromList(IRedisList fromList, int listIndex); - void SetItemInList(IRedisList toList, int listIndex, T value); - - //Queue operations - void EnqueueItemOnList(IRedisList fromList, T item); - T DequeueItemFromList(IRedisList fromList); - T BlockingDequeueItemFromList(IRedisList fromList, TimeSpan? timeOut); - - //Stack operations - void PushItemToList(IRedisList fromList, T item); - T PopItemFromList(IRedisList fromList); - T BlockingPopItemFromList(IRedisList fromList, TimeSpan? timeOut); - T PopAndPushItemBetweenLists(IRedisList fromList, IRedisList toList); - - void AddItemToSortedSet(IRedisSortedSet toSet, T value); - void AddItemToSortedSet(IRedisSortedSet toSet, T value, double score); - bool RemoveItemFromSortedSet(IRedisSortedSet fromSet, T value); - T PopItemWithLowestScoreFromSortedSet(IRedisSortedSet fromSet); - T PopItemWithHighestScoreFromSortedSet(IRedisSortedSet fromSet); - bool SortedSetContainsItem(IRedisSortedSet set, T value); - double IncrementItemInSortedSet(IRedisSortedSet set, T value, double incrementBy); - int GetItemIndexInSortedSet(IRedisSortedSet set, T value); - int GetItemIndexInSortedSetDesc(IRedisSortedSet set, T value); - List GetAllItemsFromSortedSet(IRedisSortedSet set); - List GetAllItemsFromSortedSetDesc(IRedisSortedSet set); - List GetRangeFromSortedSet(IRedisSortedSet set, int fromRank, int toRank); - List GetRangeFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank); - IDictionary GetAllWithScoresFromSortedSet(IRedisSortedSet set); - IDictionary GetRangeWithScoresFromSortedSet(IRedisSortedSet set, int fromRank, int toRank); - IDictionary GetRangeWithScoresFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank); - List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore); - List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take); - List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore); - List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take); - List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore); - List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take); - List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore); - List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take); - int RemoveRangeFromSortedSet(IRedisSortedSet set, int minRank, int maxRank); - int RemoveRangeFromSortedSetByScore(IRedisSortedSet set, double fromScore, double toScore); - int GetSortedSetCount(IRedisSortedSet set); - double GetItemScoreInSortedSet(IRedisSortedSet set, T value); - int StoreIntersectFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds); - int StoreUnionFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds); - - bool HashContainsEntry(IRedisHash hash, TKey key); - bool SetEntryInHash(IRedisHash hash, TKey key, T value); - bool SetEntryInHashIfNotExists(IRedisHash hash, TKey key, T value); - void SetRangeInHash(IRedisHash hash, IEnumerable> keyValuePairs); - T GetValueFromHash(IRedisHash hash, TKey key); - bool RemoveEntryFromHash(IRedisHash hash, TKey key); - int GetHashCount(IRedisHash hash); - List GetHashKeys(IRedisHash hash); - List GetHashValues(IRedisHash hash); - Dictionary GetAllEntriesFromHash(IRedisHash hash); - } - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs deleted file mode 100644 index a8c1d560..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs +++ /dev/null @@ -1,149 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; - -namespace ServiceStack.Redis.Generic -{ - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientHash - : IRedisHash - { - private readonly RedisTypedClient client; - private readonly string hashId; - - public RedisClientHash(RedisTypedClient client, string hashId) - { - this.client = client; - this.hashId = hashId; - } - - public string Id - { - get { return this.hashId; } - } - - public IEnumerator> GetEnumerator() - { - return client.GetAllEntriesFromHash(this).GetEnumerator(); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public Dictionary GetAll() - { - return client.GetAllEntriesFromHash(this); - } - - public void Add(KeyValuePair item) - { - client.SetEntryInHash(this, item.Key, item.Value); - } - - public void Clear() - { - client.RemoveEntry(this); - } - - public bool Contains(KeyValuePair item) - { - var value = client.GetValueFromHash(this, item.Key); - return !Equals(value, default(T)) && Equals(value, item.Value); - } - - public void CopyTo(KeyValuePair[] array, int arrayIndex) - { - var allItemsInHash = client.GetAllEntriesFromHash(this); - - var i = arrayIndex; - foreach (var entry in allItemsInHash) - { - if (i >= array.Length) return; - array[i] = entry; - } - } - - public bool Remove(KeyValuePair item) - { - return Contains(item) && client.RemoveEntryFromHash(this, item.Key); - } - - public int Count - { - get { return client.GetHashCount(this); } - } - - public bool IsReadOnly - { - get { return false; } - } - - public bool ContainsKey(TKey key) - { - return client.HashContainsEntry(this, key); - } - - public void Add(TKey key, T value) - { - client.SetEntryInHash(this, key, value); - } - - public bool Remove(TKey key) - { - return client.RemoveEntryFromHash(this, key); - } - - public bool TryGetValue(TKey key, out T value) - { - if (ContainsKey(key)) - { - value = client.GetValueFromHash(this, key); - return true; - } - value = default(T); - return false; - } - - public T this[TKey key] - { - get { return client.GetValueFromHash(this, key); } - set { client.SetEntryInHash(this, key, value); } - } - - public ICollection Keys - { - get { return client.GetHashKeys(this); } - } - - public ICollection Values - { - get { return client.GetHashValues(this); } - } - - public List GetAllKeys() - { - return client.GetHashKeys(this); - } - - public List GetAllValues() - { - return client.GetHashValues(this); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientList.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientList.Generic.cs deleted file mode 100644 index 5cf7a60e..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientList.Generic.cs +++ /dev/null @@ -1,236 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Text; - -namespace ServiceStack.Redis.Generic -{ - internal class RedisClientList - : IRedisList - { - private readonly RedisTypedClient client; - private readonly string listId; - private const int PageLimit = 1000; - - public RedisClientList(RedisTypedClient client, string listId) - { - this.listId = listId; - this.client = client; - } - - public string Id - { - get { return listId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromList(this).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetRangeFromList(this, skip, PageLimit); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(T item) - { - client.AddItemToList(this, item); - } - - public void Clear() - { - client.RemoveAllFromList(this); - } - - public bool Contains(T item) - { - //TODO: replace with native implementation when exists - foreach (var existingItem in this) - { - if (Equals(existingItem, item)) return true; - } - return false; - } - - public void CopyTo(T[] array, int arrayIndex) - { - var allItemsInList = client.GetAllItemsFromList(this); - allItemsInList.CopyTo(array, arrayIndex); - } - - public bool Remove(T item) - { - return client.RemoveItemFromList(this, item) > 0; - } - - public int Count - { - get - { - return client.GetListCount(this); - } - } - - public bool IsReadOnly { get { return false; } } - - public int IndexOf(T item) - { - //TODO: replace with native implementation when exists - var i = 0; - foreach (var existingItem in this) - { - if (Equals(existingItem, item)) return i; - i++; - } - return -1; - } - - public void Insert(int index, T item) - { - //TODO: replace with implementation involving creating on new temp list then replacing - //otherwise wait for native implementation - throw new NotImplementedException(); - } - - public void RemoveAt(int index) - { - //TODO: replace with native implementation when one exists - var markForDelete = Guid.NewGuid().ToString(); - client.NativeClient.LSet(listId, index, Encoding.UTF8.GetBytes(markForDelete)); - - const int removeAll = 0; - client.NativeClient.LRem(listId, removeAll, Encoding.UTF8.GetBytes(markForDelete)); - } - - public T this[int index] - { - get { return client.GetItemFromList(this, index); } - set { client.SetItemInList(this, index, value); } - } - - public List GetAll() - { - return client.GetAllItemsFromList(this); - } - - public List GetRange(int startingFrom, int endingAt) - { - return client.GetRangeFromList(this, startingFrom, endingAt); - } - - public List GetRangeFromSortedList(int startingFrom, int endingAt) - { - return client.SortList(this, startingFrom, endingAt); - } - - public void RemoveAll() - { - client.RemoveAllFromList(this); - } - - public void Trim(int keepStartingFrom, int keepEndingAt) - { - client.TrimList(this, keepStartingFrom, keepEndingAt); - } - - public int RemoveValue(T value) - { - return client.RemoveItemFromList(this, value); - } - - public int RemoveValue(T value, int noOfMatches) - { - return client.RemoveItemFromList(this, value, noOfMatches); - } - - public void Append(T value) - { - Add(value); - } - - public void Prepend(T value) - { - client.PrependItemToList(this, value); - } - - public T RemoveStart() - { - return client.RemoveStartFromList(this); - } - - public T BlockingRemoveStart(TimeSpan? timeOut) - { - return client.BlockingRemoveStartFromList(this, timeOut); - } - - public T RemoveEnd() - { - return client.RemoveEndFromList(this); - } - - public void Enqueue(T value) - { - client.EnqueueItemOnList(this, value); - } - - public T Dequeue() - { - return client.DequeueItemFromList(this); - } - - public T BlockingDequeue(TimeSpan? timeOut) - { - return client.BlockingDequeueItemFromList(this, timeOut); - } - - public void Push(T value) - { - client.PushItemToList(this, value); - } - - public T Pop() - { - return client.PopItemFromList(this); - } - - public T BlockingPop(TimeSpan? timeOut) - { - return client.BlockingPopItemFromList(this, timeOut); - } - - public T PopAndPush(IRedisList toList) - { - return client.PopAndPushItemBetweenLists(this, toList); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs deleted file mode 100644 index b593a4ce..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs +++ /dev/null @@ -1,150 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; - -namespace ServiceStack.Redis.Generic -{ - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientSet - : IRedisSet - { - private readonly RedisTypedClient client; - private readonly string setId; - private const int PageLimit = 1000; - - public RedisClientSet(RedisTypedClient client, string setId) - { - this.client = client; - this.setId = setId; - } - - public string Id - { - get { return this.setId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromSet(this).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetSortedEntryValues(this, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(T item) - { - client.AddItemToSet(this, item); - } - - public void Clear() - { - client.RemoveEntry(setId); - } - - public bool Contains(T item) - { - return client.SetContainsItem(this, item); - } - - public void CopyTo(T[] array, int arrayIndex) - { - var allItemsInSet = client.GetAllItemsFromSet(this); - allItemsInSet.CopyTo(array, arrayIndex); - } - - public bool Remove(T item) - { - client.RemoveItemFromSet(this, item); - return true; - } - - public int Count - { - get - { - var setCount = client.GetSetCount(this); - return setCount; - } - } - - public bool IsReadOnly { get { return false; } } - - public List Sort(int startingFrom, int endingAt) - { - return client.GetSortedEntryValues(this, startingFrom, endingAt); - } - - public HashSet GetAll() - { - return client.GetAllItemsFromSet(this); - } - - public T PopRandomItem() - { - return client.PopItemFromSet(this); - } - - public T GetRandomItem() - { - return client.GetRandomItemFromSet(this); - } - - public void MoveTo(T item, IRedisSet toSet) - { - client.MoveBetweenSets(this, toSet, item); - } - - public void PopulateWithIntersectOf(params IRedisSet[] sets) - { - client.StoreIntersectFromSets(this, sets); - } - - public void PopulateWithUnionOf(params IRedisSet[] sets) - { - client.StoreUnionFromSets(this, sets); - } - - public void GetDifferences(params IRedisSet[] withSets) - { - client.StoreUnionFromSets(this, withSets); - } - - public void PopulateWithDifferencesOf(IRedisSet fromSet, params IRedisSet[] withSets) - { - client.StoreDifferencesFromSet(this, fromSet, withSets); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs deleted file mode 100644 index 14733858..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs +++ /dev/null @@ -1,195 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; - -namespace ServiceStack.Redis.Generic -{ - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientSortedSet - : IRedisSortedSet - { - private readonly RedisTypedClient client; - private readonly string setId; - private const int PageLimit = 1000; - - public RedisClientSortedSet(RedisTypedClient client, string setId) - { - this.client = client; - this.setId = setId; - } - - public string Id - { - get { return this.setId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromSortedSet(this).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetRangeFromSortedSet(this, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(T item) - { - client.AddItemToSortedSet(this, item); - } - - public void Add(T item, double score) - { - client.AddItemToSortedSet(this, item, score); - } - - public void Clear() - { - client.RemoveEntry(setId); - } - - public bool Contains(T item) - { - return client.SortedSetContainsItem(this, item); - } - - public void CopyTo(T[] array, int arrayIndex) - { - var allItemsInSet = client.GetAllItemsFromSortedSet(this); - allItemsInSet.CopyTo(array, arrayIndex); - } - - public bool Remove(T item) - { - client.RemoveItemFromSortedSet(this, item); - return true; - } - - public int Count - { - get - { - var setCount = client.GetSortedSetCount(this); - return setCount; - } - } - - public bool IsReadOnly { get { return false; } } - - public T PopItemWithHighestScore() - { - return client.PopItemWithHighestScoreFromSortedSet(this); - } - - public T PopItemWithLowestScore() - { - return client.PopItemWithLowestScoreFromSortedSet(this); - } - - public double IncrementItem(T item, double incrementBy) - { - return client.IncrementItemInSortedSet(this, item, incrementBy); - } - - public int IndexOf(T item) - { - return client.GetItemIndexInSortedSet(this, item); - } - - public int IndexOfDescending(T item) - { - return client.GetItemIndexInSortedSetDesc(this, item); - } - - public List GetAll() - { - return client.GetAllItemsFromSortedSet(this); - } - - public List GetAllDescending() - { - return client.GetAllItemsFromSortedSetDesc(this); - } - - public List GetRange(int fromRank, int toRank) - { - return client.GetRangeFromSortedSet(this, fromRank, toRank); - } - - public List GetRangeByLowestScore(double fromScore, double toScore) - { - return client.GetRangeFromSortedSetByLowestScore(this, fromScore, toScore); - } - - public List GetRangeByLowestScore(double fromScore, double toScore, int? skip, int? take) - { - return client.GetRangeFromSortedSetByLowestScore(this, fromScore, toScore, skip, take); - } - - public List GetRangeByHighestScore(double fromScore, double toScore) - { - return client.GetRangeFromSortedSetByHighestScore(this, fromScore, toScore); - } - - public List GetRangeByHighestScore(double fromScore, double toScore, int? skip, int? take) - { - return client.GetRangeFromSortedSetByHighestScore(this, fromScore, toScore, skip, take); - } - - public int RemoveRange(int minRank, int maxRank) - { - return client.RemoveRangeFromSortedSet(this, minRank, maxRank); - } - - public int RemoveRangeByScore(double fromScore, double toScore) - { - return client.RemoveRangeFromSortedSetByScore(this, fromScore, toScore); - } - - public double GetItemScore(T item) - { - return client.GetItemScoreInSortedSet(this, item); - } - - public int PopulateWithIntersectOf(params IRedisSortedSet[] setIds) - { - return client.StoreIntersectFromSortedSets(this, setIds); - } - - public int PopulateWithUnionOf(params IRedisSortedSet[] setIds) - { - return client.StoreUnionFromSortedSets(this, setIds); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient.cs deleted file mode 100644 index 213f7751..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient.cs +++ /dev/null @@ -1,384 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using ServiceStack.Common.Extensions; -using ServiceStack.Common.Utils; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Generic -{ - /// - /// Allows you to get Redis value operations to operate against POCO types. - /// - /// - internal partial class RedisTypedClient - : IRedisTypedClient - { - readonly ITypeSerializer serializer = new JsonSerializer(); - private readonly RedisClient client; - - internal IRedisNativeClient NativeClient - { - get { return client; } - } - - /// - /// Use this to share the same redis connection with another - /// - /// The client. - public RedisTypedClient(RedisClient client) - { - this.client = client; - this.Lists = new RedisClientLists(this); - this.Sets = new RedisClientSets(this); - this.SortedSets = new RedisClientSortedSets(this); - - this.SequenceKey = client.GetTypeSequenceKey(); - this.TypeIdsSetKey = client.GetTypeIdsSetKey(); - this.TypeLockKey = "lock:" + typeof(T).Name; - } - - public string TypeIdsSetKey { get; set; } - public string TypeLockKey { get; set; } - - public IRedisTypedTransaction CreateTransaction() - { - return new RedisTypedTransaction(this); - } - - public IDisposable AcquireLock() - { - return client.AcquireLock(this.TypeLockKey); - } - - public IDisposable AcquireLock(TimeSpan timeOut) - { - return client.AcquireLock(this.TypeLockKey, timeOut); - } - - public IRedisQueableTransaction CurrentTransaction - { - get - { - return client.CurrentTransaction; - } - set - { - client.CurrentTransaction = value; - } - } - - public void Multi() - { - this.client.Multi(); - } - - public void Discard() - { - this.client.Discard(); - } - - public int Exec() - { - return this.client.Exec(); - } - - internal void AddTypeIdsRegisteredDuringTransaction() - { - client.AddTypeIdsRegisteredDuringTransaction(); - } - - internal void ClearTypeIdsRegisteredDuringTransaction() - { - client.ClearTypeIdsRegisteredDuringTransaction(); - } - - public List GetAllKeys() - { - return client.GetAllKeys(); - } - - public T this[string key] - { - get { return GetValue(key); } - set { SetEntry(key, value); } - } - - public byte[] SerializeValue(T value) - { - var strValue = serializer.SerializeToString(value); - return Encoding.UTF8.GetBytes(strValue); - } - - public T DeserializeValue(byte[] value) - { - var strValue = value != null ? Encoding.UTF8.GetString(value) : null; - return serializer.DeserializeFromString(strValue); - } - - public void SetEntry(string key, T value) - { - if (key == null) - throw new ArgumentNullException("key"); - - client.Set(key, SerializeValue(value)); - client.RegisterTypeId(value); - } - - public void SetEntry(string key, T value, TimeSpan expireIn) - { - if (key == null) - throw new ArgumentNullException("key"); - - client.Set(key, SerializeValue(value), expireIn); - client.RegisterTypeId(value); - } - - public bool SetEntryIfNotExists(string key, T value) - { - var success = client.SetNX(key, SerializeValue(value)) == RedisNativeClient.Success; - if (success) client.RegisterTypeId(value); - return success; - } - - public T GetValue(string key) - { - return DeserializeValue(client.Get(key)); - } - - public T GetAndSetValue(string key, T value) - { - return DeserializeValue(client.GetSet(key, SerializeValue(value))); - } - - public bool ContainsKey(string key) - { - return client.Exists(key) == RedisNativeClient.Success; - } - - public bool RemoveEntry(string key) - { - return client.Del(key) == RedisNativeClient.Success; - } - - public bool RemoveEntry(params string[] keys) - { - return client.Del(keys) == RedisNativeClient.Success; - } - - public bool RemoveEntry(params IHasStringId[] entities) - { - var ids = entities.ConvertAll(x => x.Id); - var success = client.Del(ids.ToArray()) == RedisNativeClient.Success; - if (success) client.RemoveTypeIds(ids.ToArray()); - return success; - } - - public int IncrementValue(string key) - { - return client.Incr(key); - } - - public int IncrementValueBy(string key, int count) - { - return client.IncrBy(key, count); - } - - public int DecrementValue(string key) - { - return client.Decr(key); - } - - public int DecrementValueBy(string key, int count) - { - return client.DecrBy(key, count); - } - - public string SequenceKey { get; set; } - - public void SetSequence(int value) - { - client.GetSet(SequenceKey, Encoding.UTF8.GetBytes(value.ToString())); - } - - public int GetNextSequence() - { - return IncrementValue(SequenceKey); - } - - public RedisKeyType GetEntryType(string key) - { - return client.GetEntryType(key); - } - - public string GetRandomKey() - { - return client.RandomKey(); - } - - public bool ExpireEntryIn(string key, TimeSpan expireIn) - { - return client.Expire(key, (int)expireIn.TotalSeconds) == RedisNativeClient.Success; - } - - public bool ExpireEntryAt(string key, DateTime expireAt) - { - return client.ExpireAt(key, expireAt.ToUnixTime()) == RedisNativeClient.Success; - } - - public TimeSpan GetTimeToLive(string key) - { - return TimeSpan.FromSeconds(client.Ttl(key)); - } - - public void Save() - { - client.Save(); - } - - public void SaveAsync() - { - client.SaveAsync(); - } - - public void FlushDb() - { - client.FlushDb(); - } - - public void FlushAll() - { - client.FlushAll(); - } - - public T[] SearchKeys(string pattern) - { - var strKeys = client.SearchKeys(pattern); - var keysCount = strKeys.Count; - - var keys = new T[keysCount]; - for (var i=0; i < keysCount; i++) - { - keys[i] = serializer.DeserializeFromString(strKeys[i]); - } - return keys; - } - - public List GetValues(List keys) - { - var resultBytesArray = client.MGet(keys.ToArray()); - - var results = new List(); - foreach (var resultBytes in resultBytesArray) - { - if (resultBytes == null) continue; - - var result = DeserializeValue(resultBytes); - results.Add(result); - } - - return results; - } - - - #region Implementation of IBasicPersistenceProvider - - public T GetById(string id) - { - var key = IdUtils.CreateUrn(id); - return this.GetValue(key); - } - - public IList GetByIds(ICollection ids) - { - if (ids == null || ids.Count == 0) - return new List(); - - var urnKeys = ids.ConvertAll(x => IdUtils.CreateUrn(x)); - return GetValues(urnKeys); - } - - public IList GetAll() - { - var allKeys = client.GetAllItemsFromSet(this.TypeIdsSetKey); - return this.GetByIds(allKeys); - } - - public T Store(T entity) - { - var urnKey = entity.CreateUrn(); - this.SetEntry(urnKey, entity); - - return entity; - } - - public void StoreAll(IEnumerable entities) - { - if (entities == null) return; - - var entitiesList = entities.ToList(); - var len = entitiesList.Count; - - var keys = new byte[len][]; - var values = new byte[len][]; - - for (var i = 0; i < len; i++) - { - keys[i] = entitiesList[i].CreateUrn().ToUtf8Bytes(); - values[i] = RedisClient.SerializeToUtf8Bytes(entitiesList[i]); - } - - client.MSet(keys, values); - client.RegisterTypeIds(entitiesList); - } - - public void Delete(T entity) - { - var urnKey = entity.CreateUrn(); - this.RemoveEntry(urnKey); - client.RemoveTypeIds(entity); - } - - public void DeleteById(string id) - { - var urnKey = IdUtils.CreateUrn(id); - - this.RemoveEntry(urnKey); - client.RemoveTypeIds(id); - } - - public void DeleteByIds(ICollection ids) - { - if (ids == null) return; - - var urnKeys = ids.ConvertAll(x => IdUtils.CreateUrn(x)); - this.RemoveEntry(urnKeys.ToArray()); - client.RemoveTypeIds(ids.ToArray()); - } - - public void DeleteAll() - { - var urnKeys = client.GetAllItemsFromSet(this.TypeIdsSetKey); - this.RemoveEntry(urnKeys.ToArray()); - this.RemoveEntry(this.TypeIdsSetKey); - } - - #endregion - - public void Dispose() {} - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs deleted file mode 100644 index 467dff49..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs +++ /dev/null @@ -1,92 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Generic -{ - internal partial class RedisTypedClient - { - public IRedisHash GetHash(string hashId) - { - return new RedisClientHash(this, hashId); - } - - public bool HashContainsEntry(IRedisHash hash, TKey key) - { - return client.HashContainsEntry(hash.Id, key.SerializeToString()); - } - - public bool SetEntryInHash(IRedisHash hash, TKey key, T value) - { - return client.SetEntryInHash(hash.Id, key.SerializeToString(), value.SerializeToString()); - } - - public bool SetEntryInHashIfNotExists(IRedisHash hash, TKey key, T value) - { - return client.SetEntryInHashIfNotExists(hash.Id, key.SerializeToString(), value.SerializeToString()); - } - - public void SetRangeInHash(IRedisHash hash, IEnumerable> keyValuePairs) - { - var stringKeyValuePairs = keyValuePairs.ToList().ConvertAll( - x => new KeyValuePair(x.Key.SerializeToString(), x.Value.SerializeToString())); - - client.SetRangeInHash(hash.Id, stringKeyValuePairs); - } - - public T GetValueFromHash(IRedisHash hash, TKey key) - { - return DeserializeFromString( - client.GetValueFromHash(hash.Id, key.SerializeToString())); - } - - public bool RemoveEntryFromHash(IRedisHash hash, TKey key) - { - return client.RemoveEntryFromHash(hash.Id, key.SerializeToString()); - } - - public int GetHashCount(IRedisHash hash) - { - return client.GetHashCount(hash.Id); - } - - public List GetHashKeys(IRedisHash hash) - { - return client.GetHashKeys(hash.Id).ConvertEachTo(); - } - - public List GetHashValues(IRedisHash hash) - { - return client.GetHashValues(hash.Id).ConvertEachTo(); - } - - public Dictionary GetAllEntriesFromHash(IRedisHash hash) - { - return ConvertEachTo(client.GetAllEntriesFromHash(hash.Id)); - } - - public static Dictionary ConvertEachTo(IDictionary map) - { - var to = new Dictionary(); - foreach (var item in map) - { - to[JsonSerializer.DeserializeFromString(item.Key)] - = JsonSerializer.DeserializeFromString(item.Value); - } - return to; - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_List.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_List.cs deleted file mode 100644 index 5daa322e..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_List.cs +++ /dev/null @@ -1,182 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - internal partial class RedisTypedClient - { - const int FirstElement = 0; - const int LastElement = -1; - - public IHasNamed> Lists { get; set; } - - internal class RedisClientLists - : IHasNamed> - { - private readonly RedisTypedClient client; - - public RedisClientLists(RedisTypedClient client) - { - this.client = client; - } - - public IRedisList this[string listId] - { - get - { - return new RedisClientList(client, listId); - } - set - { - var list = this[listId]; - list.Clear(); - list.CopyTo(value.ToArray(), 0); - } - } - } - - private List CreateList(byte[][] multiDataList) - { - if (multiDataList == null) return new List(); - - var results = new List(); - foreach (var multiData in multiDataList) - { - results.Add(DeserializeValue(multiData)); - } - return results; - } - - public List GetAllItemsFromList(IRedisList fromList) - { - var multiDataList = client.LRange(fromList.Id, FirstElement, LastElement); - return CreateList(multiDataList); - } - - public List GetRangeFromList(IRedisList fromList, int startingFrom, int endingAt) - { - var multiDataList = client.LRange(fromList.Id, startingFrom, endingAt); - return CreateList(multiDataList); - } - - public List SortList(IRedisList fromList, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; - var multiDataList = client.Sort(fromList.Id, sortOptions); - return CreateList(multiDataList); - } - - public void AddItemToList(IRedisList fromList, T value) - { - client.RPush(fromList.Id, SerializeValue(value)); - } - - public void PrependItemToList(IRedisList fromList, T value) - { - client.LPush(fromList.Id, SerializeValue(value)); - } - - public T RemoveStartFromList(IRedisList fromList) - { - return DeserializeValue(client.LPop(fromList.Id)); - } - - public T BlockingRemoveStartFromList(IRedisList fromList, TimeSpan? timeOut) - { - var unblockingKeyAndValue = client.BLPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); - return DeserializeValue(unblockingKeyAndValue[1]); - } - - public T RemoveEndFromList(IRedisList fromList) - { - return DeserializeValue(client.RPop(fromList.Id)); - } - - public void RemoveAllFromList(IRedisList fromList) - { - client.LTrim(fromList.Id, LastElement, FirstElement); - } - - public void TrimList(IRedisList fromList, int keepStartingFrom, int keepEndingAt) - { - client.LTrim(fromList.Id, keepStartingFrom, keepEndingAt); - } - - public int RemoveItemFromList(IRedisList fromList, T value) - { - const int removeAll = 0; - return client.LRem(fromList.Id, removeAll, SerializeValue(value)); - } - - public int RemoveItemFromList(IRedisList fromList, T value, int noOfMatches) - { - return client.LRem(fromList.Id, noOfMatches, SerializeValue(value)); - } - - public int GetListCount(IRedisList fromList) - { - return client.LLen(fromList.Id); - } - - public T GetItemFromList(IRedisList fromList, int listIndex) - { - return DeserializeValue(client.LIndex(fromList.Id, listIndex)); - } - - public void SetItemInList(IRedisList toList, int listIndex, T value) - { - client.LSet(toList.Id, listIndex, SerializeValue(value)); - } - - public void EnqueueItemOnList(IRedisList fromList, T item) - { - client.LPush(fromList.Id, SerializeValue(item)); - } - - public T DequeueItemFromList(IRedisList fromList) - { - return DeserializeValue(client.LPop(fromList.Id)); - } - - public T BlockingDequeueItemFromList(IRedisList fromList, TimeSpan? timeOut) - { - var unblockingKeyAndValue = client.BLPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); - return DeserializeValue(unblockingKeyAndValue[1]); - } - - public void PushItemToList(IRedisList fromList, T item) - { - client.RPush(fromList.Id, SerializeValue(item)); - } - - public T PopItemFromList(IRedisList fromList) - { - return DeserializeValue(client.RPop(fromList.Id)); - } - - public T BlockingPopItemFromList(IRedisList fromList, TimeSpan? timeOut) - { - var unblockingKeyAndValue = client.BRPop(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds); - return DeserializeValue(unblockingKeyAndValue[1]); - } - - public T PopAndPushItemBetweenLists(IRedisList fromList, IRedisList toList) - { - return DeserializeValue(client.RPopLPush(fromList.Id, toList.Id)); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs deleted file mode 100644 index 7a098eb4..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs +++ /dev/null @@ -1,147 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Collections.Generic; -using System.Linq; -using ServiceStack.Common.Extensions; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis.Generic -{ - internal partial class RedisTypedClient - { - public IHasNamed> Sets { get; set; } - - public int Db - { - get { return client.Db; } - set { client.Db = value; } - } - - internal class RedisClientSets - : IHasNamed> - { - private readonly RedisTypedClient client; - - public RedisClientSets(RedisTypedClient client) - { - this.client = client; - } - - public IRedisSet this[string setId] - { - get - { - return new RedisClientSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - private HashSet CreateHashSet(byte[][] multiDataList) - { - var results = new HashSet(); - foreach (var multiData in multiDataList) - { - results.Add(DeserializeValue(multiData)); - } - return results; - } - - public List GetSortedEntryValues(IRedisSet fromSet, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; - var multiDataList = client.Sort(fromSet.Id, sortOptions); - return CreateList(multiDataList); - } - - public HashSet GetAllItemsFromSet(IRedisSet fromSet) - { - var multiDataList = client.SMembers(fromSet.Id); - return CreateHashSet(multiDataList); - } - - public void AddItemToSet(IRedisSet toSet, T item) - { - client.SAdd(toSet.Id, SerializeValue(item)); - } - - public void RemoveItemFromSet(IRedisSet fromSet, T item) - { - client.SRem(fromSet.Id, SerializeValue(item)); - } - - public T PopItemFromSet(IRedisSet fromSet) - { - return DeserializeValue(client.SPop(fromSet.Id)); - } - - public void MoveBetweenSets(IRedisSet fromSet, IRedisSet toSet, T item) - { - client.SMove(fromSet.Id, toSet.Id, SerializeValue(item)); - } - - public int GetSetCount(IRedisSet set) - { - return client.SCard(set.Id); - } - - public bool SetContainsItem(IRedisSet set, T item) - { - return client.SIsMember(set.Id, SerializeValue(item)) == 1; - } - - public HashSet GetIntersectFromSets(params IRedisSet[] sets) - { - var multiDataList = client.SInter(sets.ConvertAll(x => x.Id).ToArray()); - return CreateHashSet(multiDataList); - } - - public void StoreIntersectFromSets(IRedisSet intoSet, params IRedisSet[] sets) - { - client.SInterStore(intoSet.Id, sets.ConvertAll(x => x.Id).ToArray()); - } - - public HashSet GetUnionFromSets(params IRedisSet[] sets) - { - var multiDataList = client.SUnion(sets.ConvertAll(x => x.Id).ToArray()); - return CreateHashSet(multiDataList); - } - - public void StoreUnionFromSets(IRedisSet intoSet, params IRedisSet[] sets) - { - client.SUnionStore(intoSet.Id, sets.ConvertAll(x => x.Id).ToArray()); - } - - public HashSet GetDifferencesFromSet(IRedisSet fromSet, params IRedisSet[] withSets) - { - var multiDataList = client.SDiff(fromSet.Id, withSets.ConvertAll(x => x.Id).ToArray()); - return CreateHashSet(multiDataList); - } - - public void StoreDifferencesFromSet(IRedisSet intoSet, IRedisSet fromSet, params IRedisSet[] withSets) - { - client.SDiffStore(intoSet.Id, fromSet.Id, withSets.ConvertAll(x => x.Id).ToArray()); - } - - public T GetRandomItemFromSet(IRedisSet fromSet) - { - return DeserializeValue(client.SRandMember(fromSet.Id)); - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs deleted file mode 100644 index a05ffa4f..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs +++ /dev/null @@ -1,283 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.Common.Utils; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Redis.Support; -using ServiceStack.Text; -using ServiceStack.Common.Extensions; - -namespace ServiceStack.Redis.Generic -{ - internal partial class RedisTypedClient - { - public IHasNamed> SortedSets { get; set; } - - internal class RedisClientSortedSets - : IHasNamed> - { - private readonly RedisTypedClient client; - - public RedisClientSortedSets(RedisTypedClient client) - { - this.client = client; - } - - public IRedisSortedSet this[string setId] - { - get - { - return new RedisClientSortedSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - public static T DeserializeFromString(string serializedObj) - { - return JsonSerializer.DeserializeFromString(serializedObj); - } - - private static IDictionary CreateGenericMap(IDictionary map) - { - var genericMap = new OrderedDictionary(); - foreach (var entry in map) - { - genericMap[DeserializeFromString(entry.Key)] = entry.Value; - } - return genericMap; - } - - public void AddItemToSortedSet(IRedisSortedSet toSet, T value) - { - client.AddItemToSortedSet(toSet.Id, value.SerializeToString()); - } - - public void AddItemToSortedSet(IRedisSortedSet toSet, T value, double score) - { - client.AddItemToSortedSet(toSet.Id, value.SerializeToString(), score); - } - - public bool RemoveItemFromSortedSet(IRedisSortedSet fromSet, T value) - { - return client.RemoveItemFromSortedSet(fromSet.Id, value.SerializeToString()); - } - - public T PopItemWithLowestScoreFromSortedSet(IRedisSortedSet fromSet) - { - return DeserializeFromString( - client.PopItemWithLowestScoreFromSortedSet(fromSet.Id)); - } - - public T PopItemWithHighestScoreFromSortedSet(IRedisSortedSet fromSet) - { - return DeserializeFromString( - client.PopItemWithHighestScoreFromSortedSet(fromSet.Id)); - } - - public bool SortedSetContainsItem(IRedisSortedSet set, T value) - { - return client.SortedSetContainsItem(set.Id, value.SerializeToString()); - } - - public double IncrementItemInSortedSet(IRedisSortedSet set, T value, double incrementBy) - { - return client.IncrementItemInSortedSet(set.Id, value.SerializeToString(), incrementBy); - } - - public int GetItemIndexInSortedSet(IRedisSortedSet set, T value) - { - return client.GetItemIndexInSortedSet(set.Id, value.SerializeToString()); - } - - public int GetItemIndexInSortedSetDesc(IRedisSortedSet set, T value) - { - return client.GetItemIndexInSortedSetDesc(set.Id, value.SerializeToString()); - } - - public List GetAllItemsFromSortedSet(IRedisSortedSet set) - { - var list = client.GetAllItemsFromSortedSet(set.Id); - return list.ConvertEachTo(); - } - - public List GetAllItemsFromSortedSetDesc(IRedisSortedSet set) - { - var list = client.GetAllItemsFromSortedSetDesc(set.Id); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSet(IRedisSortedSet set, int fromRank, int toRank) - { - var list = client.GetRangeFromSortedSet(set.Id, fromRank, toRank); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank) - { - var list = client.GetRangeFromSortedSetDesc(set.Id, fromRank, toRank); - return list.ConvertEachTo(); - } - - public IDictionary GetAllWithScoresFromSortedSet(IRedisSortedSet set) - { - var map = client.GetRangeWithScoresFromSortedSet(set.Id, FirstElement, LastElement); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSet(IRedisSortedSet set, int fromRank, int toRank) - { - var map = client.GetRangeWithScoresFromSortedSet(set.Id, fromRank, toRank); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetDesc(IRedisSortedSet set, int fromRank, int toRank) - { - var map = client.GetRangeWithScoresFromSortedSetDesc(set.Id, fromRank, toRank); - return CreateGenericMap(map); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore, skip, take); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromScore, toScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByLowestScore(set.Id, fromScore, toScore, skip, take); - return list.ConvertEachTo(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromStringScore, toStringScore, skip, take); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromScore, toScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByLowestScore(set.Id, fromScore, toScore, skip, take); - return CreateGenericMap(map); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore, skip, take); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromScore, toScore); - return list.ConvertEachTo(); - } - - public List GetRangeFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var list = client.GetRangeFromSortedSetByHighestScore(set.Id, fromScore, toScore, take, skip); - return list.ConvertEachTo(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, string fromStringScore, string toStringScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromStringScore, toStringScore, skip, take); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromScore, toScore); - return CreateGenericMap(map); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(IRedisSortedSet set, double fromScore, double toScore, int? skip, int? take) - { - var map = client.GetRangeWithScoresFromSortedSetByHighestScore(set.Id, fromScore, toScore, skip, take); - return CreateGenericMap(map); - } - - public int RemoveRangeFromSortedSet(IRedisSortedSet set, int minRank, int maxRank) - { - return client.RemoveRangeFromSortedSet(set.Id, maxRank, maxRank); - } - - public int RemoveRangeFromSortedSetByScore(IRedisSortedSet set, double fromScore, double toScore) - { - return client.RemoveRangeFromSortedSetByScore(set.Id, fromScore, toScore); - } - - public int GetSortedSetCount(IRedisSortedSet set) - { - return client.GetSortedSetCount(set.Id); - } - - public double GetItemScoreInSortedSet(IRedisSortedSet set, T value) - { - return client.GetItemScoreInSortedSet(set.Id, value.SerializeToString()); - } - - public int StoreIntersectFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds) - { - return client.StoreIntersectFromSortedSets(intoSetId.Id, setIds.ConvertAll(x => x.Id).ToArray()); - } - - public int StoreUnionFromSortedSets(IRedisSortedSet intoSetId, params IRedisSortedSet[] setIds) - { - return client.StoreUnionFromSortedSets(intoSetId.Id, setIds.ConvertAll(x => x.Id).ToArray()); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedTransaction.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedTransaction.cs deleted file mode 100644 index 4e51a01b..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Generic/RedisTypedTransaction.cs +++ /dev/null @@ -1,367 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Text; -using ServiceStack.Logging; -using ServiceStack.Text; - -namespace ServiceStack.Redis.Generic -{ - /// - /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - /// - internal class RedisTypedTransaction - : IRedisTypedTransaction, IRedisQueableTransaction - { - private readonly List queuedCommands = new List(); - - private readonly RedisTypedClient redisClient; - private QueuedRedisOperation currentQueuedOperation; - - public RedisTypedTransaction(RedisTypedClient redisClient) - { - this.redisClient = redisClient; - - if (redisClient.CurrentTransaction != null) - throw new InvalidOperationException("An atomic command is already in use"); - - redisClient.CurrentTransaction = this; - redisClient.Multi(); - } - - private void BeginQueuedCommand(QueuedRedisOperation queuedRedisOperation) - { - if (currentQueuedOperation != null) - throw new InvalidOperationException("The previous queued operation has not been commited"); - - currentQueuedOperation = queuedRedisOperation; - } - - private void AssertCurrentOperation() - { - if (currentQueuedOperation == null) - throw new InvalidOperationException("No queued operation is currently set"); - } - - private void AddCurrentQueuedOperation() - { - this.queuedCommands.Add(currentQueuedOperation); - currentQueuedOperation = null; - } - - public void CompleteVoidQueuedCommand(Action voidReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.VoidReadCommand = voidReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteIntQueuedCommand(Func intReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.IntReadCommand = intReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteBytesQueuedCommand(Func bytesReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.BytesReadCommand = bytesReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteMultiBytesQueuedCommand(Func multiBytesReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.MultiBytesReadCommand = multiBytesReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteStringQueuedCommand(Func stringReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.StringReadCommand = stringReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteMultiStringQueuedCommand(Func> multiStringReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.MultiStringReadCommand = multiStringReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteDoubleQueuedCommand(Func doubleReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.DoubleReadCommand = doubleReadCommand; - AddCurrentQueuedOperation(); - } - - - public void QueueCommand(Action> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Action> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Action> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessVoidCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, int> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, int> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, int> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessIntCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, bool> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, bool> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, bool> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessBoolCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, double> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, double> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, double> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessDoubleCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, byte[]> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, byte[]> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, byte[]> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessBytesCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, string> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, string> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, string> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessStringCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - public void QueueCommand(Func, T> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, T> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, T> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessTypeCallback = x => onSuccessCallback(JsonSerializer.DeserializeFromString(x)), - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, byte[][]> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, byte[][]> command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, byte[][]> command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessMultiBytesCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func, List> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessMultiStringCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - public void QueueCommand(Func, List> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func, List> command, Action> onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(y => JsonSerializer.DeserializeFromString(y))), - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void Commit() - { - try - { - var resultCount = redisClient.Exec(); - if (resultCount != queuedCommands.Count) - throw new InvalidOperationException(string.Format( - "Invalid results received from 'EXEC', expected '{0}' received '{1}'" - + "\nWarning: Transaction was committed", - queuedCommands.Count, resultCount)); - - foreach (var queuedCommand in queuedCommands) - { - queuedCommand.ProcessResult(); - } - } - finally - { - redisClient.CurrentTransaction = null; - redisClient.AddTypeIdsRegisteredDuringTransaction(); - } - } - - public void Rollback() - { - if (redisClient.CurrentTransaction == null) - throw new InvalidOperationException("There is no current transaction to Rollback"); - - redisClient.CurrentTransaction = null; - redisClient.ClearTypeIdsRegisteredDuringTransaction(); - redisClient.Discard(); - } - - public void Dispose() - { - if (redisClient.CurrentTransaction == null) return; - Rollback(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClient.cs deleted file mode 100644 index 65422896..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClient.cs +++ /dev/null @@ -1,218 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.CacheAccess; -using ServiceStack.DataAccess; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Redis.Generic; - -namespace ServiceStack.Redis -{ - public interface IRedisClient - : IBasicPersistenceProvider, ICacheClient - { - //Basic Redis Connection operations - int Db { get; set; } - int DbSize { get; } - Dictionary Info { get; } - DateTime LastSave { get; } - string Host { get; } - int Port { get; } - int RetryTimeout { get; set; } - int RetryCount { get; set; } - int SendTimeout { get; set; } - string Password { get; set; } - bool HadExceptions { get; } - - void Save(); - void SaveAsync(); - void Shutdown(); - void RewriteAppendOnlyFileAsync(); - void FlushDb(); - - //Basic Redis Connection Info - string this[string key] { get; set; } - - List GetAllKeys(); - void SetEntry(string key, string value); - void SetEntry(string key, string value, TimeSpan expireIn); - bool SetEntryIfNotExists(string key, string value); - string GetValue(string key); - string GetAndSetEntry(string key, string value); - List GetValues(List keys); - List GetValues(List keys); - Dictionary GetValuesMap(List keys); - Dictionary GetValuesMap(List keys); - int AppendToValue(string key, string value); - string GetSubstring(string key, int fromIndex, int toIndex); - - bool ContainsKey(string key); - bool RemoveEntry(params string[] args); - int IncrementValue(string key); - int IncrementValueBy(string key, int count); - int DecrementValue(string key); - int DecrementValueBy(string key, int count); - List SearchKeys(string pattern); - - RedisKeyType GetEntryType(string key); - string GetRandomKey(); - bool ExpireEntryIn(string key, TimeSpan expireIn); - bool ExpireEntryAt(string key, DateTime expireAt); - TimeSpan GetTimeToLive(string key); - List GetSortedEntryValues(string key, int startingFrom, int endingAt); - - //Store entities without registering entity ids - void WriteAll(IEnumerable entities); - - //Useful high-level abstractions - IRedisTypedClient GetTypedClient(); - - IHasNamed Lists { get; set; } - IHasNamed Sets { get; set; } - IHasNamed SortedSets { get; set; } - IHasNamed Hashes { get; set; } - - IRedisTransaction CreateTransaction(); - - IDisposable AcquireLock(string key); - IDisposable AcquireLock(string key, TimeSpan timeOut); - - #region Redis pubsub - - IRedisSubscription CreateSubscription(); - int PublishMessage(string toChannel, string message); - - #endregion - - - #region Set operations - - HashSet GetAllItemsFromSet(string setId); - void AddItemToSet(string setId, string item); - void AddRangeToSet(string setId, List items); - void RemoveItemFromSet(string setId, string item); - string PopItemFromSet(string setId); - void MoveBetweenSets(string fromSetId, string toSetId, string item); - int GetSetCount(string setId); - bool SetContainsItem(string setId, string item); - HashSet GetIntersectFromSets(params string[] setIds); - void StoreIntersectFromSets(string intoSetId, params string[] setIds); - HashSet GetUnionFromSets(params string[] setIds); - void StoreUnionFromSets(string intoSetId, params string[] setIds); - HashSet GetDifferencesFromSet(string fromSetId, params string[] withSetIds); - void StoreDifferencesFromSet(string intoSetId, string fromSetId, params string[] withSetIds); - string GetRandomItemFromSet(string setId); - - #endregion - - - #region List operations - - List GetAllItemsFromList(string listId); - List GetRangeFromList(string listId, int startingFrom, int endingAt); - List GetRangeFromSortedList(string listId, int startingFrom, int endingAt); - void AddItemToList(string listId, string value); - void AddRangeToList(string listId, List values); - void PrependItemToList(string listId, string value); - void PrependRangeToList(string listId, List values); - - void RemoveAllFromList(string listId); - string RemoveStartFromList(string listId); - string BlockingRemoveStartFromList(string listId, TimeSpan? timeOut); - string RemoveEndFromList(string listId); - void TrimList(string listId, int keepStartingFrom, int keepEndingAt); - int RemoveItemFromList(string listId, string value); - int RemoveItemFromList(string listId, string value, int noOfMatches); - int GetListCount(string listId); - string GetItemFromList(string listId, int listIndex); - void SetItemInList(string listId, int listIndex, string value); - - //Queue operations - void EnqueueItemOnList(string listId, string value); - string DequeueItemFromList(string listId); - string BlockingDequeueItemFromList(string listId, TimeSpan? timeOut); - - //Stack operations - void PushItemToList(string listId, string value); - string PopItemFromList(string listId); - string BlockingPopItemFromList(string listId, TimeSpan? timeOut); - string PopAndPushItemBetweenLists(string fromListId, string toListId); - - #endregion - - - #region Sorted Set operations - - bool AddItemToSortedSet(string setId, string value); - bool AddItemToSortedSet(string setId, string value, double score); - bool AddRangeToSortedSet(string setId, List values, double score); - bool RemoveItemFromSortedSet(string setId, string value); - string PopItemWithLowestScoreFromSortedSet(string setId); - string PopItemWithHighestScoreFromSortedSet(string setId); - bool SortedSetContainsItem(string setId, string value); - double IncrementItemInSortedSet(string setId, string value, double incrementBy); - int GetItemIndexInSortedSet(string setId, string value); - int GetItemIndexInSortedSetDesc(string setId, string value); - List GetAllItemsFromSortedSet(string setId); - List GetAllItemsFromSortedSetDesc(string setId); - List GetRangeFromSortedSet(string setId, int fromRank, int toRank); - List GetRangeFromSortedSetDesc(string setId, int fromRank, int toRank); - IDictionary GetAllWithScoresFromSortedSet(string setId); - IDictionary GetRangeWithScoresFromSortedSet(string setId, int fromRank, int toRank); - IDictionary GetRangeWithScoresFromSortedSetDesc(string setId, int fromRank, int toRank); - List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore); - List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take); - List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore); - List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore); - IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take); - List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore); - List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take); - List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore); - List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore); - IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take); - int RemoveRangeFromSortedSet(string setId, int minRank, int maxRank); - int RemoveRangeFromSortedSetByScore(string setId, double fromScore, double toScore); - int GetSortedSetCount(string setId); - double GetItemScoreInSortedSet(string setId, string value); - int StoreIntersectFromSortedSets(string intoSetId, params string[] setIds); - int StoreUnionFromSortedSets(string intoSetId, params string[] setIds); - - #endregion - - - #region Hash operations - - bool HashContainsEntry(string hashId, string key); - bool SetEntryInHash(string hashId, string key, string value); - bool SetEntryInHashIfNotExists(string hashId, string key, string value); - void SetRangeInHash(string hashId, IEnumerable> keyValuePairs); - int IncrementValueInHash(string hashId, string key, int incrementBy); - string GetValueFromHash(string hashId, string key); - List GetValuesFromHash(string hashId, params string[] keys); - bool RemoveEntryFromHash(string hashId, string key); - int GetHashCount(string hashId); - List GetHashKeys(string hashId); - List GetHashValues(string hashId); - Dictionary GetAllEntriesFromHash(string hashId); - - #endregion - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientCacheManager.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientCacheManager.cs deleted file mode 100644 index 5ac9a742..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientCacheManager.cs +++ /dev/null @@ -1,45 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.CacheAccess; - -namespace ServiceStack.Redis -{ - public interface IRedisClientCacheManager - : IDisposable - { - /// - /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - /// - /// - IRedisClient GetClient(); - - /// - /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - /// - /// - IRedisClient GetReadOnlyClient(); - - /// - /// Returns a Read/Write ICacheClient (The default) using the hosts defined in ReadWriteHosts - /// - /// - ICacheClient GetCacheClient(); - - /// - /// Returns a ReadOnly ICacheClient using the hosts defined in ReadOnlyHosts. - /// - /// - ICacheClient GetReadOnlyCacheClient(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientFactory.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientFactory.cs deleted file mode 100644 index db3a1b34..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientFactory.cs +++ /dev/null @@ -1,21 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Net; - -namespace ServiceStack.Redis -{ - public interface IRedisClientFactory - { - RedisClient CreateRedisClient(string host, int port); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientsManager.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientsManager.cs deleted file mode 100644 index 2848dc32..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisClientsManager.cs +++ /dev/null @@ -1,44 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.CacheAccess; - -namespace ServiceStack.Redis -{ - public interface IRedisClientsManager : IDisposable - { - /// - /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - /// - /// - IRedisClient GetClient(); - - /// - /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - /// - /// - IRedisClient GetReadOnlyClient(); - - /// - /// Returns a Read/Write ICacheClient (The default) using the hosts defined in ReadWriteHosts - /// - /// - ICacheClient GetCacheClient(); - - /// - /// Returns a ReadOnly ICacheClient using the hosts defined in ReadOnlyHosts. - /// - /// - ICacheClient GetReadOnlyCacheClient(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisHash.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisHash.cs deleted file mode 100644 index e9ea41f1..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisHash.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis -{ - public interface IRedisHash - : IDictionary, IHasStringId - { - bool AddIfNotExists(KeyValuePair item); - void AddRange(IEnumerable> items); - int IncrementValue(string key, int incrementBy); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisList.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisList.cs deleted file mode 100644 index 74fd7b77..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisList.cs +++ /dev/null @@ -1,45 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis -{ - public interface IRedisList - : IList, IHasStringId - { - List GetAll(); - List GetRange(int startingFrom, int endingAt); - List GetRangeFromSortedList(int startingFrom, int endingAt); - void RemoveAll(); - void Trim(int keepStartingFrom, int keepEndingAt); - int RemoveValue(string value); - int RemoveValue(string value, int noOfMatches); - - void Prepend(string value); - void Append(string value); - string RemoveStart(); - string BlockingRemoveStart(TimeSpan? timeOut); - string RemoveEnd(); - - void Enqueue(string value); - string Dequeue(); - string BlockingDequeue(TimeSpan? timeOut); - - void Push(string value); - string Pop(); - string BlockingPop(TimeSpan? timeOut); - string PopAndPush(IRedisList toList); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisNativeClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisNativeClient.cs deleted file mode 100644 index b4eac503..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisNativeClient.cs +++ /dev/null @@ -1,134 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.Common.Extensions; - -namespace ServiceStack.Redis -{ - public interface IRedisNativeClient - : IDisposable - { - //Redis utility operations - Dictionary Info { get; } - int Db { get; set; } - void Save(); - void BgSave(); - void Shutdown(); - void BgRewriteAof(); - void Quit(); - void FlushDb(); - void FlushAll(); - bool Ping(); - string Echo(string text); - void SlaveOf(string hostname, int port); - void SlaveOfNoOne(); - - //Common key-value Redis operations - void Set(string key, byte[] value); - void SetEx(string key, int expireInSeconds, byte[] value); - int SetNX(string key, byte[] value); - byte[] Get(string key); - byte[] GetSet(string key, byte[] value); - int Del(string key); - int Incr(string key); - int IncrBy(string key, int count); - int Decr(string key); - int DecrBy(string key, int count); - int Append(string key, byte[] value); - byte[] Substr(string key, int fromIndex, int toIndex); - - string RandomKey(); - void Rename(string oldKeyname, string newKeyname); - int Expire(string key, int seconds); - int ExpireAt(string key, long unixTime); - int Ttl(string key); - - //Redis Sort operation (works on lists, sets or hashes) - byte[][] Sort(string listOrSetId, SortOptions sortOptions); - - //Redis List operations - byte[][] LRange(string listId, int startingFrom, int endingAt); - int RPush(string listId, byte[] value); - int LPush(string listId, byte[] value); - void LTrim(string listId, int keepStartingFrom, int keepEndingAt); - int LRem(string listId, int removeNoOfMatches, byte[] value); - int LLen(string listId); - byte[] LIndex(string listId, int listIndex); - void LSet(string listId, int listIndex, byte[] value); - byte[] LPop(string listId); - byte[] RPop(string listId); - byte[][] BLPop(string listId, int timeOutSecs); - byte[][] BRPop(string listId, int timeOutSecs); - byte[] RPopLPush(string fromListId, string toListId); - - - //Redis Set operations - byte[][] SMembers(string setId); - int SAdd(string setId, byte[] value); - void SRem(string setId, byte[] value); - byte[] SPop(string setId); - void SMove(string fromSetId, string toSetId, byte[] value); - int SCard(string setId); - int SIsMember(string setId, byte[] value); - byte[][] SInter(params string[] setIds); - void SInterStore(string intoSetId, params string[] setIds); - byte[][] SUnion(params string[] setIds); - void SUnionStore(string intoSetId, params string[] setIds); - byte[][] SDiff(string fromSetId, params string[] withSetIds); - void SDiffStore(string intoSetId, string fromSetId, params string[] withSetIds); - byte[] SRandMember(string setId); - - - //Redis Sorted Set operations - int ZAdd(string setId, double score, byte[] value); - int ZRem(string setId, byte[] value); - double ZIncrBy(string setId, double incrBy, byte[] value); - int ZRank(string setId, byte[] value); - int ZRevRank(string setId, byte[] value); - byte[][] ZRange(string setId, int min, int max); - byte[][] ZRangeWithScores(string setId, int min, int max); - byte[][] ZRevRange(string setId, int min, int max); - byte[][] ZRevRangeWithScores(string setId, int min, int max); - byte[][] ZRangeByScore(string setId, double min, double max, int? skip, int? take); - byte[][] ZRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take); - byte[][] ZRevRangeByScore(string setId, double min, double max, int? skip, int? take); - byte[][] ZRevRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take); - int ZRemRangeByRank(string setId, int min, int max); - int ZRemRangeByScore(string setId, double fromScore, double toScore); - int ZCard(string setId); - double ZScore(string setId, byte[] value); - int ZUnionStore(string intoSetId, params string[] setIds); - int ZInterStore(string intoSetId, params string[] setIds); - - //Redis Hash operations - int HSet(string hashId, byte[] key, byte[] value); - int HSetNX(string hashId, byte[] key, byte[] value); - void HMSet(string hashId, byte[][] keys, byte[][] values); - int HIncrby(string hashId, byte[] key, int incrementBy); - byte[] HGet(string hashId, byte[] key); - int HDel(string hashId, byte[] key); - int HExists(string hashId, byte[] key); - int HLen(string hashId); - byte[][] HKeys(string hashId); - byte[][] HVals(string hashId); - byte[][] HGetAll(string hashId); - - //Redis Pub/Sub operations - int Publish(string toChannel, byte[] message); - byte[][] Subscribe(params string[] toChannels); - byte[][] UnSubscribe(params string[] toChannels); - byte[][] PSubscribe(params string[] toChannelsMatchingPatterns); - byte[][] PUnSubscribe(params string[] toChannelsMatchingPatterns); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisQueableTransaction.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisQueableTransaction.cs deleted file mode 100644 index 76872781..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisQueableTransaction.cs +++ /dev/null @@ -1,16 +0,0 @@ -using System; -using System.Collections.Generic; - -namespace ServiceStack.Redis -{ - public interface IRedisQueableTransaction - { - void CompleteVoidQueuedCommand(Action voidReadCommand); - void CompleteIntQueuedCommand(Func intReadCommand); - void CompleteBytesQueuedCommand(Func bytesReadCommand); - void CompleteMultiBytesQueuedCommand(Func multiBytesReadCommand); - void CompleteStringQueuedCommand(Func stringReadCommand); - void CompleteMultiStringQueuedCommand(Func> multiStringReadCommand); - void CompleteDoubleQueuedCommand(Func doubleReadCommand); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSet.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSet.cs deleted file mode 100644 index 86b06e4c..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSet.cs +++ /dev/null @@ -1,33 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis -{ - public interface IRedisSet - : ICollection, IHasStringId - { - List GetRangeFromSortedSet(int startingFrom, int endingAt); - HashSet GetAll(); - string Pop(); - void Move(string value, IRedisSet toSet); - HashSet Intersect(params IRedisSet[] withSets); - void StoreIntersect(params IRedisSet[] withSets); - HashSet Union(params IRedisSet[] withSets); - void StoreUnion(params IRedisSet[] withSets); - HashSet Diff(IRedisSet[] withSets); - void StoreDiff(IRedisSet fromSet, params IRedisSet[] withSets); - string GetRandomEntry(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSortedSet.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSortedSet.cs deleted file mode 100644 index b226a3df..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSortedSet.cs +++ /dev/null @@ -1,37 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Collections.Generic; -using ServiceStack.DesignPatterns.Model; - -namespace ServiceStack.Redis -{ - public interface IRedisSortedSet - : ICollection, IHasStringId - { - List GetAll(); - List GetRange(int startingRank, int endingRank); - List GetRangeByScore(string fromStringScore, string toStringScore); - List GetRangeByScore(string fromStringScore, string toStringScore, int? skip, int? take); - List GetRangeByScore(double fromScore, double toScore); - List GetRangeByScore(double fromScore, double toScore, int? skip, int? take); - void RemoveRange(int fromRank, int toRank); - void RemoveRangeByScore(double fromScore, double toScore); - void StoreFromIntersect(params IRedisSortedSet[] ofSets); - void StoreFromUnion(params IRedisSortedSet[] ofSets); - int GetItemIndex(string value); - double GetItemScore(string value); - void IncrementItemScore(string value, double incrementByScore); - string PopItemWithHighestScore(); - string PopItemWithLowestScore(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSubscription.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSubscription.cs deleted file mode 100644 index cfa57ffa..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisSubscription.cs +++ /dev/null @@ -1,44 +0,0 @@ -using System; - -namespace ServiceStack.Redis -{ - public interface IRedisSubscription - : IDisposable - { - /// - /// The number of active subscriptions this client has - /// - int SubscriptionCount { get; } - - /// - /// Registered handler called after client *Subscribes* to each new channel - /// - Action OnSubscribe { get; set; } - - /// - /// Registered handler called when each message is received - /// - Action OnMessage { get; set; } - - /// - /// Registered handler called when each channel is unsubscribed - /// - Action OnUnSubscribe { get; set; } - - /// - /// Subscribe to channels by name - /// - /// - void SubscribeToChannels(params string[] channels); - - /// - /// Subscribe to channels matching the supplied patterns - /// - /// - void SubscribeToChannelsMatching(params string[] patterns); - - void UnSubscribeFromAllChannels(); - void UnSubscribeFromChannels(params string[] channels); - void UnSubscribeFromChannelsMatching(params string[] patterns); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisTransaction.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisTransaction.cs deleted file mode 100644 index 8b4442cd..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/IRedisTransaction.cs +++ /dev/null @@ -1,52 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; - -namespace ServiceStack.Redis -{ - public interface IRedisTransaction - : IDisposable - { - void QueueCommand(Action command); - void QueueCommand(Action command, Action onSuccessCallback); - void QueueCommand(Action command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func command); - void QueueCommand(Func command, Action onSuccessCallback); - void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func command); - void QueueCommand(Func command, Action onSuccessCallback); - void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func command); - void QueueCommand(Func command, Action onSuccessCallback); - void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func command); - void QueueCommand(Func command, Action onSuccessCallback); - void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func command); - void QueueCommand(Func command, Action onSuccessCallback); - void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback); - - void QueueCommand(Func> command); - void QueueCommand(Func> command, Action> onSuccessCallback); - void QueueCommand(Func> command, Action> onSuccessCallback, Action onErrorCallback); - - void Commit(); - void Rollback(); - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageProducer.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageProducer.cs deleted file mode 100644 index c4bdca70..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageProducer.cs +++ /dev/null @@ -1,67 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageProducer - : IMessageProducer - { - private readonly IRedisClientsManager clientsManager; - private readonly Action onPublishedCallback; - - public RedisMessageProducer(IRedisClientsManager clientsManager, Action onPublishedCallback) - { - this.clientsManager = clientsManager; - this.onPublishedCallback = onPublishedCallback; - } - - private IRedisNativeClient readWriteClient; - public IRedisNativeClient ReadWriteClient - { - get - { - if (this.readWriteClient == null) - { - this.readWriteClient = (IRedisNativeClient)clientsManager.GetClient(); - } - return readWriteClient; - } - } - - public void Publish(T messageBody) - { - Publish((IMessage)new Message(messageBody)); - } - - public void Publish(IMessage message) - { - var messageBytes = message.ToBytes(); - this.ReadWriteClient.LPush(message.ToInQueueName(), messageBytes); - - if (onPublishedCallback != null) - { - onPublishedCallback(); - } - } - - public void Dispose() - { - if (readWriteClient != null) - { - readWriteClient.Dispose(); - } - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageQueueClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageQueueClient.cs deleted file mode 100644 index fcd4742a..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageQueueClient.cs +++ /dev/null @@ -1,108 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageQueueClient - : IMessageQueueClient - { - private readonly Action onPublishedCallback; - private readonly IRedisClientsManager clientsManager; - - public RedisMessageQueueClient( - IRedisClientsManager clientsManager, Action onPublishedCallback) - { - this.onPublishedCallback = onPublishedCallback; - this.clientsManager = clientsManager; - } - - private IRedisNativeClient readWriteClient; - public IRedisNativeClient ReadWriteClient - { - get - { - if (this.readWriteClient == null) - { - this.readWriteClient = (IRedisNativeClient)clientsManager.GetClient(); - } - return readWriteClient; - } - } - - private IRedisNativeClient readOnlyClient; - public IRedisNativeClient ReadOnlyClient - { - get - { - if (this.readOnlyClient == null) - { - this.readOnlyClient = (IRedisNativeClient)clientsManager.GetReadOnlyClient(); - } - return readOnlyClient; - } - } - - public void Publish(T messageBody) - { - Publish(new Message(messageBody)); - } - - public void Publish(IMessage message) - { - var messageBytes = message.ToBytes(); - Publish(message.ToInQueueName(), messageBytes); - } - - public void Publish(string queueName, byte[] messageBytes) - { - this.ReadWriteClient.LPush(queueName, messageBytes); - - if (onPublishedCallback != null) - { - onPublishedCallback(); - } - } - - public void Notify(string queueName, byte[] messageBytes) - { - const int maxSuccessQueueSize = 1000; - this.ReadWriteClient.LPush(queueName, messageBytes); - this.ReadWriteClient.LTrim(queueName, 0, maxSuccessQueueSize); - } - - public byte[] Get(string queueName, TimeSpan? timeOut) - { - var unblockingKeyAndValue = this.ReadOnlyClient.BRPop(queueName, (int) timeOut.GetValueOrDefault().TotalSeconds); - return unblockingKeyAndValue[1]; - } - - public byte[] GetAsync(string queueName) - { - return this.ReadOnlyClient.RPop(queueName); - } - - public void Dispose() - { - if (this.readOnlyClient != null) - { - this.readOnlyClient.Dispose(); - } - if (this.readWriteClient != null) - { - this.readWriteClient.Dispose(); - } - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageQueueClientFactory.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageQueueClientFactory.cs deleted file mode 100644 index 138e866b..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisMessageQueueClientFactory.cs +++ /dev/null @@ -1,41 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisMessageQueueClientFactory - : IMessageQueueClientFactory - { - private readonly Action onPublishedCallback; - private readonly IRedisClientsManager clientsManager; - - public RedisMessageQueueClientFactory( - IRedisClientsManager clientsManager, Action onPublishedCallback) - { - this.onPublishedCallback = onPublishedCallback; - this.clientsManager = clientsManager; - } - - public IMessageQueueClient CreateMessageQueueClient() - { - return new RedisMessageQueueClient( - this.clientsManager, this.onPublishedCallback); - } - - public void Dispose() - { - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisTransientMessageFactory.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisTransientMessageFactory.cs deleted file mode 100644 index 7e539698..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisTransientMessageFactory.cs +++ /dev/null @@ -1,81 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - /// - /// Transient message queues are a one-pass message queue service that starts - /// processing messages when Start() is called. Any subsequent Start() calls - /// while the service is running is ignored. - /// - /// The transient service will continue to run until all messages have been - /// processed after which time it will shutdown all processing until Start() is called again. - /// - public class RedisTransientMessageFactory - : IMessageFactory - { - public IRedisClientsManager ClientsManager { get; private set; } - - public RedisTransientMessageService MessageService { get; private set; } - - public RedisTransientMessageFactory( - IRedisClientsManager clientsManager) - : this(2, null, clientsManager) - { - } - - public RedisTransientMessageFactory(int retryAttempts, TimeSpan? requestTimeOut, - IRedisClientsManager clientsManager) - { - this.ClientsManager = clientsManager ?? new BasicRedisClientManager(); - MessageService = new RedisTransientMessageService( - retryAttempts, requestTimeOut, this); - } - - public IMessageProducer CreateMessageProducer() - { - return new RedisMessageProducer(this.ClientsManager, OnMessagePublished); - } - - public IMessageService CreateMessageService() - { - return MessageService; - } - - - public void OnMessagePublished() - { - if (this.MessageService != null) - { - this.MessageService.Start(); - } - } - - public void Dispose() - { - if (this.MessageService != null) - { - this.MessageService.Dispose(); - this.MessageService = null; - } - - if (this.ClientsManager != null) - { - this.ClientsManager.Dispose(); - this.ClientsManager = null; - } - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisTransientMessageService.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisTransientMessageService.cs deleted file mode 100644 index 1d7cb303..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Messaging/RedisTransientMessageService.cs +++ /dev/null @@ -1,40 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using ServiceStack.Common.Extensions; -using ServiceStack.Messaging; - -namespace ServiceStack.Redis.Messaging -{ - public class RedisTransientMessageService - : TransientMessageServiceBase - { - private readonly RedisMessageQueueClientFactory factory; - - public RedisTransientMessageService(int retryAttempts, TimeSpan? requestTimeOut, - RedisTransientMessageFactory messageFactory) - : base(retryAttempts, requestTimeOut) - { - messageFactory.ThrowIfNull("messageFactory"); - - this.factory = new RedisMessageQueueClientFactory( - messageFactory.ClientsManager, messageFactory.OnMessagePublished); - } - - public override IMessageQueueClientFactory MessageFactory - { - get { return this.factory; } - } - } - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Pipelining/RedisNativeClient_Pipline.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Pipelining/RedisNativeClient_Pipline.cs deleted file mode 100644 index a0d498b5..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Pipelining/RedisNativeClient_Pipline.cs +++ /dev/null @@ -1,155 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisNativeClient - { - public abstract class PipelineCommand - { - private byte[][] cmdWithBinaryArgs_; - protected readonly RedisNativeClient client_; - - public PipelineCommand() - { - } - public PipelineCommand(RedisNativeClient client) - { - client_ = client; - } - public void init(params byte[][] cmdWithBinaryArgs) - { - cmdWithBinaryArgs_ = cmdWithBinaryArgs; - } - public void execute() - { - if (cmdWithBinaryArgs_ == null) - { - var throwEx = new Exception(string.Format("Attempt to execute uninitialized pipleine command")); - log.Error(throwEx.Message); - throw throwEx; - } - if (!client_.SendCommand(cmdWithBinaryArgs_)) - throw client_.CreateConnectionError(); - } - public abstract void expect(); - } - public class ExpectCodeCommand : PipelineCommand - { - public string Code { get; set; } - public ExpectCodeCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - Code = client_.ExpectCode(); - } - } - public class ExpectSuccessCommand : PipelineCommand - { - public ExpectSuccessCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - client_.ExpectSuccess(); - } - } - public class ExpectIntCommand : PipelineCommand - { - private int expectedInt; - public ExpectIntCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - expectedInt = client_.ReadInt(); - } - public int getInt() - { - return expectedInt; - } - } - public class ExpectDoubleCommand : PipelineCommand - { - private double expectedDouble; - public ExpectDoubleCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - expectedDouble = client_.parseDouble(client_.ReadData()); - } - public double getDouble() - { - return expectedDouble; - } - } - public class ExpectStringCommand : PipelineCommand - { - private string expectedString; - public ExpectStringCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - var bytes = client_.ReadData(); - expectedString = bytes.FromUtf8Bytes(); - } - public string getString() - { - return expectedString; - } - } - public class ExpectWordCommand : PipelineCommand - { - private readonly string word_; - public ExpectWordCommand(RedisNativeClient client) : base(client) - { - } - public ExpectWordCommand(RedisNativeClient client, string word) - : base(client) - { - word_ = word; - } - public override void expect() - { - client_.ExpectWord(word_); - } - } - public class ExpectDataCommand : PipelineCommand - { - private byte[] data; - public ExpectDataCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - data = client_.ReadData(); - } - public byte[] getData() - { - return data; - } - - } - public class ExpectMultiDataCommand : PipelineCommand - { - private byte[][] multiData; - public ExpectMultiDataCommand(RedisNativeClient client) : base(client) - { - } - public override void expect() - { - multiData = client_.ReadMultiData(); - } - public byte[][] getMultiData() - { - return multiData; - } - } - } -} diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Pipelining/RedisPipeline.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Pipelining/RedisPipeline.cs deleted file mode 100644 index 540a7a8f..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Pipelining/RedisPipeline.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - public class RedisPipelineCommand - { - private readonly RedisNativeClient client; - private Queue commands = new Queue(); - - public RedisPipelineCommand(RedisNativeClient client) - { - this.client = client; - } - - public void WriteCommand(params byte[][] cmdWithBinaryArgs) - { - RedisNativeClient.ExpectIntCommand cmd = new RedisNativeClient.ExpectIntCommand(client); - cmd.init(cmdWithBinaryArgs); - cmd.execute(); - commands.Enqueue(cmd); - } - - public List ReadAllAsInts() - { - var results = new List(); - if (commands.Count() == 0) - return results; - while (commands.Count() > 0) - { - results.Add(commands.Dequeue().getInt()); - } - return results; - } - - public bool ReadAllAsIntsHaveSuccess() - { - var allResults = ReadAllAsInts(); - return allResults.All(x => x == RedisNativeClient.Success); - } - - public void Flush() - { - client.FlushSendBuffer(); - } - } -} diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/PooledRedisClientManager.ICacheClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/PooledRedisClientManager.ICacheClient.cs deleted file mode 100644 index 685596fc..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/PooledRedisClientManager.ICacheClient.cs +++ /dev/null @@ -1,197 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using ServiceStack.CacheAccess; - -namespace ServiceStack.Redis -{ - /// - /// For more interoperabilty I'm also implementing the ICacheClient on - /// this cache client manager which has the affect of calling - /// GetCacheClient() for all write operations and GetReadOnlyCacheClient() - /// for the read ones. - /// - /// This works well for master-slave replication scenarios where you have - /// 1 master that replicates to multiple read slaves. - /// - public partial class PooledRedisClientManager - : IRedisClientCacheManager, ICacheClient - { - public const int DefaultCacheDb = 9; - - public ICacheClient GetCacheClient() - { - return ConfigureRedisClient(this.GetClient()); - } - - public ICacheClient GetReadOnlyCacheClient() - { - return ConfigureRedisClient(this.GetReadOnlyClient()); - } - - private ICacheClient ConfigureRedisClient(IRedisClient client) - { - //Provide automatic partitioning of 'Redis Caches' from normal persisted data - //which is on DB '0' by default. - - var notUserSpecified = this.Db == RedisNativeClient.DefaultDb; - if (notUserSpecified) - { - client.Db = DefaultCacheDb; - } - return client; - } - - - #region Implementation of ICacheClient - - public bool Remove(string key) - { - using (var client = GetReadOnlyCacheClient()) - { - return client.Remove(key); - } - } - - public void RemoveAll(IEnumerable keys) - { - using (var client = GetCacheClient()) - { - client.RemoveAll(keys); - } - } - - public T Get(string key) - { - using (var client = GetReadOnlyCacheClient()) - { - return client.Get(key); - } - } - - public long Increment(string key, uint amount) - { - using (var client = GetCacheClient()) - { - return client.Increment(key, amount); - } - } - - public long Decrement(string key, uint amount) - { - using (var client = GetCacheClient()) - { - return client.Decrement(key, amount); - } - } - - public bool Add(string key, T value) - { - using (var client = GetCacheClient()) - { - return client.Add(key, value); - } - } - - public bool Set(string key, T value) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value); - } - } - - public bool Replace(string key, T value) - { - using (var client = GetCacheClient()) - { - return client.Replace(key, value); - } - } - - public bool Add(string key, T value, DateTime expiresAt) - { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresAt); - } - } - - public bool Set(string key, T value, DateTime expiresAt) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresAt); - } - } - - public bool Replace(string key, T value, DateTime expiresAt) - { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresAt); - } - } - - public bool Add(string key, T value, TimeSpan expiresIn) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresIn); - } - } - - public bool Set(string key, T value, TimeSpan expiresIn) - { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresIn); - } - } - - public bool Replace(string key, T value, TimeSpan expiresIn) - { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresIn); - } - } - - public void FlushAll() - { - using (var client = GetCacheClient()) - { - client.FlushAll(); - } - } - - public IDictionary GetAll(IEnumerable keys) - { - using (var client = GetReadOnlyCacheClient()) - { - return client.GetAll(keys); - } - } - - public void SetAll(IDictionary values) - { - foreach (var entry in values) - { - Set(entry.Key, entry.Value); - } - } - #endregion - } - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/PooledRedisClientManager.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/PooledRedisClientManager.cs deleted file mode 100644 index 157834f3..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/PooledRedisClientManager.cs +++ /dev/null @@ -1,363 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using ServiceStack.Common.Web; -using ServiceStack.Logging; - -namespace ServiceStack.Redis -{ - /// - /// Provides thread-safe pooling of redis client connections. - /// Allows load-balancing of master-write and read-slave hosts, ideal for - /// 1 master and multiple replicated read slaves. - /// - public partial class PooledRedisClientManager - : IRedisClientsManager - { - private static readonly ILog Log = LogManager.GetLogger(typeof(PooledRedisClientManager)); - - protected const int PoolSizeMultiplier = 10; - - private List ReadWriteHosts { get; set; } - private List ReadOnlyHosts { get; set; } - - private RedisClient[] writeClients = new RedisClient[0]; - protected int WritePoolIndex; - - private RedisClient[] readClients = new RedisClient[0]; - protected int ReadPoolIndex; - - protected int RedisClientCounter = 0; - - protected RedisClientManagerConfig Config { get; set; } - - public IRedisClientFactory RedisClientFactory { get; set; } - - public int Db { get; private set; } - - public PooledRedisClientManager() : this(RedisNativeClient.DefaultHost) { } - - public PooledRedisClientManager(params string[] readWriteHosts) - : this(readWriteHosts, readWriteHosts) - { - } - - public PooledRedisClientManager(IEnumerable readWriteHosts, IEnumerable readOnlyHosts) - : this(readWriteHosts, readOnlyHosts, null) - { - } - - /// - /// Hosts can be an IP Address or Hostname in the format: host[:port] - /// e.g. 127.0.0.1:6379 - /// default is: localhost:6379 - /// - /// The write hosts. - /// The read hosts. - /// The config. - public PooledRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - RedisClientManagerConfig config) - : this(readWriteHosts, readOnlyHosts, config, RedisNativeClient.DefaultDb) - { - } - - public PooledRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - int initalDb) - : this(readWriteHosts, readOnlyHosts, null, initalDb) - { - } - - public PooledRedisClientManager( - IEnumerable readWriteHosts, - IEnumerable readOnlyHosts, - RedisClientManagerConfig config, - int initalDb) - { - this.Db = config != null - ? config.DefaultDb.GetValueOrDefault(initalDb) - : initalDb; - - ReadWriteHosts = readWriteHosts.ToIpEndPoints(); - ReadOnlyHosts = readOnlyHosts.ToIpEndPoints(); - - this.RedisClientFactory = Redis.RedisClientFactory.Instance; - - this.Config = config ?? new RedisClientManagerConfig { - MaxWritePoolSize = ReadWriteHosts.Count * PoolSizeMultiplier, - MaxReadPoolSize = ReadOnlyHosts.Count * PoolSizeMultiplier, - }; - - if (this.Config.AutoStart) - { - this.OnStart(); - } - } - - protected virtual void OnStart() - { - this.Start(); - } - - /// - /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - /// - /// - public IRedisClient GetClient() - { - lock (writeClients) - { - AssertValidReadWritePool(); - - RedisClient inActiveClient; - while ((inActiveClient = GetInActiveWriteClient()) == null) - { - Monitor.Wait(writeClients); - } - - WritePoolIndex++; - inActiveClient.Active = true; - - //Reset database to default if changed - if (inActiveClient.Db != Db) - { - inActiveClient.Db = Db; - } - - return inActiveClient; - } - } - - /// - /// Called within a lock - /// - /// - private RedisClient GetInActiveWriteClient() - { - for (var i=0; i < writeClients.Length; i++) - { - var nextIndex = (WritePoolIndex + i) % writeClients.Length; - - //Initialize if not exists - var existingClient = writeClients[nextIndex]; - if (existingClient == null - || existingClient.HadExceptions) - { - if (existingClient != null) - { - existingClient.DisposeConnection(); - } - - var nextHost = ReadWriteHosts[nextIndex % ReadWriteHosts.Count]; - - var client = RedisClientFactory.CreateRedisClient( - nextHost.Host, nextHost.Port); - - client.Id = RedisClientCounter++; - client.ClientManager = this; - - writeClients[nextIndex] = client; - - return client; - } - - //look for free one - if (!writeClients[nextIndex].Active) - { - return writeClients[nextIndex]; - } - } - return null; - } - - /// - /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - /// - /// - public virtual IRedisClient GetReadOnlyClient() - { - lock (readClients) - { - AssertValidReadOnlyPool(); - - RedisClient inActiveClient; - while ((inActiveClient = GetInActiveReadClient()) == null) - { - Monitor.Wait(readClients); - } - - ReadPoolIndex++; - inActiveClient.Active = true; - - //Reset database to default if changed - if (inActiveClient.Db != Db) - { - inActiveClient.Db = Db; - } - - return inActiveClient; - } - } - - /// - /// Called within a lock - /// - /// - private RedisClient GetInActiveReadClient() - { - for (var i=0; i < readClients.Length; i++) - { - var nextIndex = (ReadPoolIndex + i) % readClients.Length; - - //Initialize if not exists - var existingClient = readClients[nextIndex]; - if (existingClient == null - || existingClient.HadExceptions) - { - if (existingClient != null) - { - existingClient.DisposeConnection(); - } - - var nextHost = ReadOnlyHosts[nextIndex % ReadOnlyHosts.Count]; - var client = RedisClientFactory.CreateRedisClient( - nextHost.Host, nextHost.Port); - - client.ClientManager = this; - - readClients[nextIndex] = client; - - return client; - } - - //look for free one - if (!readClients[nextIndex].Active) - { - return readClients[nextIndex]; - } - } - return null; - } - - public void DisposeClient(RedisNativeClient client) - { - lock (readClients) - { - for (var i = 0; i < readClients.Length; i++) - { - var readClient = readClients[i]; - if (client != readClient) continue; - client.Active = false; - Monitor.PulseAll(readClients); - return; - } - } - - lock (writeClients) - { - for (var i = 0; i < writeClients.Length; i++) - { - var writeClient = writeClients[i]; - if (client != writeClient) continue; - client.Active = false; - Monitor.PulseAll(writeClients); - return; - } - } - - //Console.WriteLine("Couldn't find {0} client with Id: {1}, readclients: {2}, writeclients: {3}", - // client.IsDisposed ? "Disposed" : "Undisposed", - // client.Id, - // string.Join(", ", readClients.ToList().ConvertAll(x => x != null ? x.Id.ToString() : "").ToArray()), - // string.Join(", ", writeClients.ToList().ConvertAll(x => x != null ? x.Id.ToString() : "").ToArray())); - - if (client.IsDisposed) return; - - throw new NotSupportedException("Cannot add unknown client back to the pool"); - } - - public void Start() - { - if (writeClients.Length > 0 || readClients.Length > 0) - throw new InvalidOperationException("Pool has already been started"); - - writeClients = new RedisClient[Config.MaxWritePoolSize]; - WritePoolIndex = 0; - - readClients = new RedisClient[Config.MaxReadPoolSize]; - ReadPoolIndex = 0; - } - - private void AssertValidReadWritePool() - { - if (writeClients.Length < 1) - throw new InvalidOperationException("Need a minimum read-write pool size of 1, then call Start()"); - } - - private void AssertValidReadOnlyPool() - { - if (readClients.Length < 1) - throw new InvalidOperationException("Need a minimum read pool size of 1, then call Start()"); - } - - ~PooledRedisClientManager() - { - Dispose(false); - } - - public void Dispose() - { - Dispose(true); - GC.SuppressFinalize(this); - } - - protected virtual void Dispose(bool disposing) - { - if (disposing) - { - // get rid of managed resources - } - - // get rid of unmanaged resources - for (var i = 0; i < writeClients.Length; i++) - { - Dispose(writeClients[i]); - } - for (var i = 0; i < readClients.Length; i++) - { - Dispose(readClients[i]); - } - } - - protected void Dispose(RedisClient redisClient) - { - if (redisClient == null) return; - try - { - redisClient.DisposeConnection(); - } - catch (Exception ex) - { - Log.Error(string.Format( - "Error when trying to dispose of RedisClient to host {0}:{1}", - redisClient.Host, redisClient.Port), ex); - } - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/QueuedRedisOperation.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/QueuedRedisOperation.cs deleted file mode 100644 index 0846737c..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/QueuedRedisOperation.cs +++ /dev/null @@ -1,141 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using ServiceStack.Logging; - -namespace ServiceStack.Redis -{ - internal class QueuedRedisOperation - { - private static readonly ILog Log = LogManager.GetLogger(typeof(QueuedRedisOperation)); - - public Action VoidReadCommand { get; set; } - public Func IntReadCommand { get; set; } - public Func BoolReadCommand { get; set; } - public Func BytesReadCommand { get; set; } - public Func MultiBytesReadCommand { get; set; } - public Func StringReadCommand { get; set; } - public Func> MultiStringReadCommand { get; set; } - public Func DoubleReadCommand { get; set; } - - public Action OnSuccessVoidCallback { get; set; } - public Action OnSuccessIntCallback { get; set; } - public Action OnSuccessBoolCallback { get; set; } - public Action OnSuccessBytesCallback { get; set; } - public Action OnSuccessMultiBytesCallback { get; set; } - public Action OnSuccessStringCallback { get; set; } - public Action> OnSuccessMultiStringCallback { get; set; } - public Action OnSuccessDoubleCallback { get; set; } - - public Action OnSuccessTypeCallback { get; set; } - public Action> OnSuccessMultiTypeCallback { get; set; } - - public Action OnErrorCallback { get; set; } - - public void ProcessResult() - { - try - { - if (VoidReadCommand != null) - { - VoidReadCommand(); - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } - } - else if (IntReadCommand != null) - { - var result = IntReadCommand(); - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback(result); - } - if (OnSuccessBoolCallback != null) - { - var success = result == RedisNativeClient.Success; - OnSuccessBoolCallback(success); - } - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } - } - else if (DoubleReadCommand != null) - { - var result = DoubleReadCommand(); - if (OnSuccessDoubleCallback != null) - { - OnSuccessDoubleCallback(result); - } - } - else if (BytesReadCommand != null) - { - var result = BytesReadCommand(); - if (OnSuccessBytesCallback != null) - { - OnSuccessBytesCallback(result); - } - if (OnSuccessStringCallback != null) - { - OnSuccessStringCallback(Encoding.UTF8.GetString(result)); - } - if (OnSuccessTypeCallback != null) - { - OnSuccessTypeCallback(Encoding.UTF8.GetString(result)); - } - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback(int.Parse(Encoding.UTF8.GetString(result))); - } - } - else if (StringReadCommand != null) - { - var result = StringReadCommand(); - if (OnSuccessStringCallback != null) - { - OnSuccessStringCallback(result); - } - if (OnSuccessTypeCallback != null) - { - OnSuccessTypeCallback(result); - } - } - else if (MultiBytesReadCommand != null) - { - var result = MultiBytesReadCommand(); - if (OnSuccessBytesCallback != null) - { - OnSuccessMultiBytesCallback(result); - } - if (OnSuccessMultiStringCallback != null) - { - OnSuccessMultiStringCallback(result.ToStringList()); - } - if (OnSuccessMultiTypeCallback != null) - { - OnSuccessMultiTypeCallback(result.ToStringList()); - } - } - else if (MultiStringReadCommand != null) - { - var result = MultiStringReadCommand(); - if (OnSuccessMultiStringCallback != null) - { - OnSuccessMultiStringCallback(result); - } - } - } - catch (Exception ex) - { - Log.Error(ex); - - if (OnErrorCallback != null) - { - OnErrorCallback(ex); - } - } - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisCacheClientFactory.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisCacheClientFactory.cs deleted file mode 100644 index e4f34ce6..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisCacheClientFactory.cs +++ /dev/null @@ -1,30 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Net; - -namespace ServiceStack.Redis -{ - /// - /// Provide the factory implementation for creating a RedisCacheClient that - /// can be mocked and used by different 'Redis Client Managers' - /// - public class RedisCacheClientFactory : IRedisClientFactory - { - public static RedisCacheClientFactory Instance = new RedisCacheClientFactory(); - - public RedisClient CreateRedisClient(string host, int port) - { - return new RedisClient(host, port); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient.ICacheClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient.ICacheClient.cs deleted file mode 100644 index 16ab4b25..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient.ICacheClient.cs +++ /dev/null @@ -1,191 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using ServiceStack.CacheAccess; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisClient - : ICacheClient - { - public void RemoveAll(IEnumerable keys) - { - RemoveEntry(keys.ToArray()); - } - - public T Get(string key) - { - return typeof(T) == typeof(byte[]) - ? (T)(object)base.Get(key) - : JsonSerializer.DeserializeFromString(GetValue(key)); - } - - public long Increment(string key, uint amount) - { - return IncrementValueBy(key, (int)amount); - } - - public long Decrement(string key, uint amount) - { - return DecrementValueBy(key, (int)amount); - } - - public bool Add(string key, T value) - { - var bytesValue = value as byte[]; - if (bytesValue != null) - { - return base.SetNX(key, bytesValue) == Success; - } - - var valueString = JsonSerializer.SerializeToString(value); - return SetEntryIfNotExists(key, valueString); - } - - public bool Set(string key, T value) - { - var bytesValue = value as byte[]; - if (bytesValue != null) - { - base.Set(key, bytesValue); - return true; - } - - var valueString = JsonSerializer.SerializeToString(value); - SetEntry(key, valueString); - return true; - } - - public bool Replace(string key, T value) - { - var exists = ContainsKey(key); - if (!exists) return false; - - var bytesValue = value as byte[]; - if (bytesValue != null) - { - base.Set(key, bytesValue); - return true; - } - - SetEntry(key, JsonSerializer.SerializeToString(value)); - return true; - } - - public bool Add(string key, T value, DateTime expiresAt) - { - if (Add(key, value)) - { - ExpireEntryAt(key, expiresAt); - return true; - } - return false; - } - - public bool Set(string key, T value, TimeSpan expiresIn) - { - var bytesValue = value as byte[]; - if (bytesValue != null) - { - base.SetEx(key, (int)expiresIn.TotalSeconds, bytesValue); - return true; - } - - var valueString = JsonSerializer.SerializeToString(value); - SetEntry(key, valueString, expiresIn); - return true; - } - - public bool Set(string key, T value, DateTime expiresAt) - { - Set(key, value); - ExpireEntryAt(key, expiresAt); - return true; - } - - public bool Replace(string key, T value, DateTime expiresAt) - { - if (Replace(key, value)) - { - ExpireEntryAt(key, expiresAt); - return true; - } - return false; - } - - public bool Add(string key, T value, TimeSpan expiresIn) - { - if (Add(key, value)) - { - ExpireEntryIn(key, expiresIn); - return true; - } - return false; - } - - public bool Replace(string key, T value, TimeSpan expiresIn) - { - if (Replace(key, value)) - { - ExpireEntryIn(key, expiresIn); - return true; - } - return false; - } - - public IDictionary GetAll(IEnumerable keys) - { - var keysArray = keys.ToArray(); - var keyValues = MGet(keysArray); - var results = new Dictionary(); - var isBytes = typeof(T) == typeof(byte[]); - - var i = 0; - foreach (var keyValue in keyValues) - { - var key = keysArray[i++]; - - if (keyValue == null) - { - results[key] = default(T); - continue; - } - - if (isBytes) - { - results[key] = (T)(object)keyValue; - } - else - { - var keyValueString = Encoding.UTF8.GetString(keyValue); - results[key] = JsonSerializer.DeserializeFromString(keyValueString); - } - } - return results; - } - - public void SetAll(IDictionary values) - { - foreach (var entry in values) - { - Set(entry.Key, entry.Value); - } - } - } - - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient.cs deleted file mode 100644 index 64ccfcbf..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient.cs +++ /dev/null @@ -1,547 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using ServiceStack.Common.Extensions; -using ServiceStack.Common.Utils; -using ServiceStack.Redis.Generic; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - /// - /// The client wraps the native redis operations into a more readable c# API. - /// - /// Where possible these operations are also exposed in common c# interfaces, - /// e.g. RedisClient.Lists => IList[string] - /// RedisClient.Sets => ICollection[string] - /// - public partial class RedisClient - : RedisNativeClient, IRedisClient - { - public RedisClient() - { - Init(); - } - - public RedisClient(string host) - : base(host) - { - Init(); - } - - public RedisClient(string host, int port) - : base(host, port) - { - Init(); - } - - public void Init() - { - this.Lists = new RedisClientLists(this); - this.Sets = new RedisClientSets(this); - this.SortedSets = new RedisClientSortedSets(this); - this.Hashes = new RedisClientHashes(this); - } - - public string this[string key] - { - get { return GetValue(key); } - set { SetEntry(key, value); } - } - - public string GetTypeSequenceKey() - { - return "seq:" + typeof(T).Name; - } - - public string GetTypeIdsSetKey() - { - return "ids:" + typeof(T).Name; - } - - public void RewriteAppendOnlyFileAsync() - { - base.BgRewriteAof(); - } - - public List GetAllKeys() - { - return SearchKeys("*"); - } - - public void SetEntry(string key, string value) - { - var bytesValue = value != null - ? value.ToUtf8Bytes() - : null; - - Set(key, bytesValue); - } - - public void SetEntry(string key, string value, TimeSpan expireIn) - { - var bytesValue = value != null - ? value.ToUtf8Bytes() - : null; - - SetEx(key, (int)expireIn.TotalSeconds, bytesValue); - } - - public bool SetEntryIfNotExists(string key, string value) - { - if (value == null) - throw new ArgumentNullException("value"); - - return SetNX(key, value.ToUtf8Bytes()) == Success; - } - - public string GetValue(string key) - { - var bytes = Get(key); - return bytes == null - ? null - : bytes.FromUtf8Bytes(); - } - - public string GetAndSetEntry(string key, string value) - { - return GetSet(key, value.ToUtf8Bytes()).FromUtf8Bytes(); - } - - public bool ContainsKey(string key) - { - return Exists(key) == Success; - } - - public bool Remove(string key) - { - return Del(key) == Success; - } - - public bool RemoveEntry(params string[] keys) - { - if (keys.Length == 0) return false; - - return Del(keys) == Success; - } - - public int IncrementValue(string key) - { - return Incr(key); - } - - public int IncrementValueBy(string key, int count) - { - return IncrBy(key, count); - } - - public int DecrementValue(string key) - { - return Decr(key); - } - - public int DecrementValueBy(string key, int count) - { - return DecrBy(key, count); - } - - public int AppendToValue(string key, string value) - { - return base.Append(key, value.ToUtf8Bytes()); - } - - public string GetSubstring(string key, int fromIndex, int toIndex) - { - return base.Substr(key, fromIndex, toIndex).FromUtf8Bytes(); - } - - public string GetRandomKey() - { - return RandomKey(); - } - - public bool ExpireEntryIn(string key, TimeSpan expireIn) - { - return Expire(key, (int)expireIn.TotalSeconds) == Success; - } - - public bool ExpireEntryAt(string key, DateTime expireAt) - { - return ExpireAt(key, expireAt.ToUnixTime()) == Success; - } - - public TimeSpan GetTimeToLive(string key) - { - return TimeSpan.FromSeconds(Ttl(key)); - } - - public IRedisTypedClient GetTypedClient() - { - return new RedisTypedClient(this); - } - - public IDisposable AcquireLock(string key) - { - return new RedisLock(this, key, null); - } - - public IDisposable AcquireLock(string key, TimeSpan timeOut) - { - return new RedisLock(this, key, timeOut); - } - - public IRedisTransaction CreateTransaction() - { - return new RedisTransaction(this); - } - - public List SearchKeys(string pattern) - { - var hasBug = IsPreVersion1_26; - if (hasBug) - { - var spaceDelimitedKeys = KeysV126(pattern).FromUtf8Bytes(); - return spaceDelimitedKeys.IsNullOrEmpty() - ? new List() - : new List(spaceDelimitedKeys.Split(' ')); - } - - var multiDataList = Keys(pattern); - return multiDataList.ToStringList(); - } - - public List GetValues(List keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - var resultBytesArray = MGet(keys.ToArray()); - - var results = new List(); - foreach (var resultBytes in resultBytesArray) - { - if (resultBytes == null) continue; - - var resultString = resultBytes.FromUtf8Bytes(); - results.Add(resultString); - } - - return results; - } - - public List GetValues(List keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - if (keys.Count == 0) return new List(); - - var resultBytesArray = MGet(keys.ToArray()); - - var results = new List(); - foreach (var resultBytes in resultBytesArray) - { - if (resultBytes == null) continue; - - var resultString = resultBytes.FromUtf8Bytes(); - var result = JsonSerializer.DeserializeFromString(resultString); - results.Add(result); - } - - return results; - } - - public Dictionary GetValuesMap(List keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - if (keys.Count == 0) return new Dictionary(); - - var keysArray = keys.ToArray(); - var resultBytesArray = MGet(keysArray); - - var results = new Dictionary(); - for (var i = 0; i < resultBytesArray.Length; i++) - { - var key = keysArray[i]; - - var resultBytes = resultBytesArray[i]; - if (resultBytes == null) - { - results.Add(key, null); - } - else - { - var resultString = resultBytes.FromUtf8Bytes(); - results.Add(key, resultString); - } - } - - return results; - } - - public Dictionary GetValuesMap(List keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - if (keys.Count == 0) return new Dictionary(); - - var keysArray = keys.ToArray(); - var resultBytesArray = MGet(keysArray); - - var results = new Dictionary(); - for (var i = 0; i < resultBytesArray.Length; i++) - { - var key = keysArray[i]; - - var resultBytes = resultBytesArray[i]; - if (resultBytes == null) - { - results.Add(key, default(T)); - } - else - { - var resultString = resultBytes.FromUtf8Bytes(); - var result = JsonSerializer.DeserializeFromString(resultString); - results.Add(key, result); - } - } - - return results; - } - - public IRedisSubscription CreateSubscription() - { - return new RedisSubscription(this); - } - - public int PublishMessage(string toChannel, string message) - { - return base.Publish(toChannel, message.ToUtf8Bytes()); - } - - #region IBasicPersistenceProvider - - - Dictionary> registeredTypeIdsWithinTransactionMap = new Dictionary>(); - - internal HashSet GetRegisteredTypeIdsWithinTransaction(string typeIdsSet) - { - HashSet registeredTypeIdsWithinTransaction; - if (!registeredTypeIdsWithinTransactionMap.TryGetValue(typeIdsSet, out registeredTypeIdsWithinTransaction)) - { - registeredTypeIdsWithinTransaction = new HashSet(); - registeredTypeIdsWithinTransactionMap[typeIdsSet] = registeredTypeIdsWithinTransaction; - } - return registeredTypeIdsWithinTransaction; - } - - internal void RegisterTypeId(T value) - { - var typeIdsSetKey = GetTypeIdsSetKey(); - var id = value.GetId().ToString(); - - if (this.CurrentTransaction != null) - { - var registeredTypeIdsWithinTransaction = GetRegisteredTypeIdsWithinTransaction(typeIdsSetKey); - registeredTypeIdsWithinTransaction.Add(id); - } - else - { - this.AddItemToSet(typeIdsSetKey, id); - } - } - - internal void RegisterTypeIds(IEnumerable values) - { - var typeIdsSetKey = GetTypeIdsSetKey(); - var ids = values.ConvertAll(x => x.GetId().ToString()); - - if (this.CurrentTransaction != null) - { - var registeredTypeIdsWithinTransaction = GetRegisteredTypeIdsWithinTransaction(typeIdsSetKey); - ids.ForEach(x => registeredTypeIdsWithinTransaction.Add(x)); - } - else - { - AddRangeToSet(typeIdsSetKey, ids); - } - } - - internal void RemoveTypeIds(params string[] ids) - { - var typeIdsSetKey = GetTypeIdsSetKey(); - if (this.CurrentTransaction != null) - { - var registeredTypeIdsWithinTransaction = GetRegisteredTypeIdsWithinTransaction(typeIdsSetKey); - ids.ForEach(x => registeredTypeIdsWithinTransaction.Remove(x)); - } - else - { - ids.ForEach(x => this.RemoveItemFromSet(typeIdsSetKey, x)); - } - } - - internal void RemoveTypeIds(params T[] values) - { - var typeIdsSetKey = GetTypeIdsSetKey(); - if (this.CurrentTransaction != null) - { - var registeredTypeIdsWithinTransaction = GetRegisteredTypeIdsWithinTransaction(typeIdsSetKey); - values.ForEach(x => registeredTypeIdsWithinTransaction.Remove(x.GetId().ToString())); - } - else - { - values.ForEach(x => this.RemoveItemFromSet(typeIdsSetKey, x.GetId().ToString())); - } - } - - internal void AddTypeIdsRegisteredDuringTransaction() - { - foreach (var entry in registeredTypeIdsWithinTransactionMap) - { - var typeIdsSetKey = entry.Key; - foreach (var id in entry.Value) - { - var registeredTypeIdsWithinTransaction = GetRegisteredTypeIdsWithinTransaction(typeIdsSetKey); - registeredTypeIdsWithinTransaction.ForEach(x => this.AddItemToSet(typeIdsSetKey, id)); - } - } - registeredTypeIdsWithinTransactionMap = new Dictionary>(); - } - - internal void ClearTypeIdsRegisteredDuringTransaction() - { - registeredTypeIdsWithinTransactionMap = new Dictionary>(); - } - - - public T GetById(object id) where T : class, new() - { - var key = IdUtils.CreateUrn(id); - var valueString = this.GetValue(key); - var value = JsonSerializer.DeserializeFromString(valueString); - return value; - } - - public IList GetByIds(ICollection ids) - where T : class, new() - { - if (ids == null || ids.Count == 0) - return new List(); - - var urnKeys = ids.ConvertAll(x => IdUtils.CreateUrn(x)); - return GetValues(urnKeys); - } - - public IList GetAll() - where T : class, new() - { - var typeIdsSetKy = this.GetTypeIdsSetKey(); - var allTypeIds = this.GetAllItemsFromSet(typeIdsSetKy); - var urnKeys = allTypeIds.ConvertAll(x => IdUtils.CreateUrn(x)); - return GetValues(urnKeys); - } - - public T Store(T entity) - where T : class, new() - { - var urnKey = entity.CreateUrn(); - var valueString = JsonSerializer.SerializeToString(entity); - - this.SetEntry(urnKey, valueString); - RegisterTypeId(entity); - - return entity; - } - - public void StoreAll(IEnumerable entities) - where TEntity : class, new() - { - if (entities == null) return; - - var entitiesList = entities.ToList(); - var len = entitiesList.Count; - - var keys = new byte[len][]; - var values = new byte[len][]; - - for (var i = 0; i < len; i++) - { - keys[i] = entitiesList[i].CreateUrn().ToUtf8Bytes(); - values[i] = SerializeToUtf8Bytes(entitiesList[i]); - } - - base.MSet(keys, values); - RegisterTypeIds(entitiesList); - } - - public void WriteAll(IEnumerable entities) - { - if (entities == null) return; - - var entitiesList = entities.ToList(); - var len = entitiesList.Count; - - var keys = new byte[len][]; - var values = new byte[len][]; - - for (var i = 0; i < len; i++) - { - keys[i] = entitiesList[i].CreateUrn().ToUtf8Bytes(); - values[i] = SerializeToUtf8Bytes(entitiesList[i]); - } - - base.MSet(keys, values); - } - - public static byte[] SerializeToUtf8Bytes(T value) - { - return Encoding.UTF8.GetBytes(JsonSerializer.SerializeToString(value)); - } - - public void Delete(T entity) - where T : class, new() - { - var urnKey = entity.CreateUrn(); - this.Remove(urnKey); - this.RemoveTypeIds(entity); - } - - public void DeleteById(object id) where T : class, new() - { - var urnKey = IdUtils.CreateUrn(id); - this.Remove(urnKey); - this.RemoveTypeIds(id.ToString()); - } - - public void DeleteByIds(ICollection ids) where T : class, new() - { - if (ids == null) return; - - var urnKeys = ids.ConvertAll(x => IdUtils.CreateUrn(x)); - this.RemoveEntry(urnKeys.ToArray()); - this.RemoveTypeIds(ids.ConvertAll(x => x.ToString()).ToArray()); - } - - public void DeleteAll() where T : class, new() - { - var typeIdsSetKey = this.GetTypeIdsSetKey(); - var ids = this.GetAllItemsFromSet(typeIdsSetKey); - var urnKeys = ids.ConvertAll(x => IdUtils.CreateUrn(x)); - this.RemoveEntry(urnKeys.ToArray()); - this.Remove(typeIdsSetKey); - } - - #endregion - } - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientFactory.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientFactory.cs deleted file mode 100644 index 85f8ef39..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientFactory.cs +++ /dev/null @@ -1,30 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System.Net; - -namespace ServiceStack.Redis -{ - /// - /// Provide the default factory implementation for creating a RedisClient that - /// can be mocked and used by different 'Redis Client Managers' - /// - public class RedisClientFactory : IRedisClientFactory - { - public static RedisClientFactory Instance = new RedisClientFactory(); - - public RedisClient CreateRedisClient(string host, int port) - { - return new RedisClient(host, port); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientHash.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientHash.cs deleted file mode 100644 index bfba4ec3..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientHash.cs +++ /dev/null @@ -1,162 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientHash - : IRedisHash - { - private readonly RedisClient client; - private readonly string hashId; - - public RedisClientHash(RedisClient client, string hashId) - { - this.client = client; - this.hashId = hashId; - } - - public IEnumerator> GetEnumerator() - { - return client.GetAllEntriesFromHash(hashId).GetEnumerator(); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(KeyValuePair item) - { - client.SetEntryInHash(hashId, item.Key, item.Value); - } - - public bool AddIfNotExists(KeyValuePair item) - { - return client.SetEntryInHashIfNotExists(hashId, item.Key, item.Value); - } - - public void AddRange(IEnumerable> items) - { - client.SetRangeInHash(hashId, items); - } - - public int IncrementValue(string key, int incrementBy) - { - return client.IncrementValueInHash(hashId, key, incrementBy); - } - - public void Clear() - { - client.Remove(hashId); - } - - public bool Contains(KeyValuePair item) - { - var itemValue = client.GetValueFromHash(hashId, item.Key); - return itemValue == item.Value; - } - - public void CopyTo(KeyValuePair[] array, int arrayIndex) - { - var allItemsInHash = client.GetAllEntriesFromHash(hashId); - - var i = arrayIndex; - foreach (var item in allItemsInHash) - { - if (i >= array.Length) return; - array[i++] = item; - } - } - - public bool Remove(KeyValuePair item) - { - if (Contains(item)) - { - client.RemoveEntryFromHash(hashId, item.Key); - return true; - } - return false; - } - - public int Count - { - get { return client.GetHashCount(hashId); } - } - - public bool IsReadOnly - { - get { return false; } - } - - public bool ContainsKey(string key) - { - return client.HashContainsEntry(hashId, key); - } - - public void Add(string key, string value) - { - client.SetEntryInHash(hashId, key, value); - } - - public bool Remove(string key) - { - return client.RemoveEntryFromHash(hashId, key); - } - - public bool TryGetValue(string key, out string value) - { - value = client.GetValueFromHash(hashId, key); - return value != null; - } - - public string this[string key] - { - get - { - return client.GetValueFromHash(hashId, key); - } - set - { - client.SetEntryInHash(hashId, key, value); - } - } - - public ICollection Keys - { - get - { - return client.GetHashKeys(hashId); - } - } - - public ICollection Values - { - get - { - return client.GetHashValues(hashId); - } - } - - public string Id - { - get { return hashId; } - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientList.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientList.cs deleted file mode 100644 index 8f77a250..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientList.cs +++ /dev/null @@ -1,236 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; - -namespace ServiceStack.Redis -{ - /// - /// Wrap the common redis list operations under a IList[string] interface. - /// - internal class RedisClientList - : IRedisList - { - private readonly RedisClient client; - private readonly string listId; - private const int PageLimit = 1000; - - public RedisClientList(RedisClient client, string listId) - { - this.listId = listId; - this.client = client; - } - - public string Id - { - get { return listId; } - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromList(listId).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetRangeFromList(listId, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(string item) - { - client.AddItemToList(listId, item); - } - - public void Clear() - { - client.RemoveAllFromList(listId); - } - - public bool Contains(string item) - { - //TODO: replace with native implementation when exists - foreach (var existingItem in this) - { - if (existingItem == item) return true; - } - return false; - } - - public void CopyTo(string[] array, int arrayIndex) - { - var allItemsInList = client.GetAllItemsFromList(listId); - allItemsInList.CopyTo(array, arrayIndex); - } - - public bool Remove(string item) - { - return client.RemoveItemFromList(listId, item) > 0; - } - - public int Count - { - get - { - return client.GetListCount(listId); - } - } - - public bool IsReadOnly { get { return false; } } - - public int IndexOf(string item) - { - //TODO: replace with native implementation when exists - var i = 0; - foreach (var existingItem in this) - { - if (existingItem == item) return i; - i++; - } - return -1; - } - - public void Insert(int index, string item) - { - //TODO: replace with implementation involving creating on new temp list then replacing - //otherwise wait for native implementation - throw new NotImplementedException(); - } - - public void RemoveAt(int index) - { - //TODO: replace with native implementation when one exists - var markForDelete = Guid.NewGuid().ToString(); - client.SetItemInList(listId, index, markForDelete); - client.RemoveItemFromList(listId, markForDelete); - } - - public string this[int index] - { - get { return client.GetItemFromList(listId, index); } - set { client.SetItemInList(listId, index, value); } - } - - public List GetAll() - { - return client.GetAllItemsFromList(listId); - } - - public List GetRange(int startingFrom, int endingAt) - { - return client.GetRangeFromList(listId, startingFrom, endingAt); - } - - public List GetRangeFromSortedList(int startingFrom, int endingAt) - { - return client.GetRangeFromSortedList(listId, startingFrom, endingAt); - } - - public void RemoveAll() - { - client.RemoveAllFromList(listId); - } - - public void Trim(int keepStartingFrom, int keepEndingAt) - { - client.TrimList(listId, keepStartingFrom, keepEndingAt); - } - - public int RemoveValue(string value) - { - return client.RemoveItemFromList(listId, value); - } - - public int RemoveValue(string value, int noOfMatches) - { - return client.RemoveItemFromList(listId, value, noOfMatches); - } - - public void Append(string value) - { - Add(value); - } - - public string RemoveStart() - { - return client.RemoveStartFromList(listId); - } - - public string BlockingRemoveStart(TimeSpan? timeOut) - { - return client.BlockingRemoveStartFromList(listId, timeOut); - } - - public string RemoveEnd() - { - return client.RemoveEndFromList(listId); - } - - public void Enqueue(string value) - { - client.EnqueueItemOnList(listId, value); - } - - public void Prepend(string value) - { - client.PrependItemToList(listId, value); - } - - public void Push(string value) - { - client.PushItemToList(listId, value); - } - - public string Pop() - { - return client.PopItemFromList(listId); - } - - public string BlockingPop(TimeSpan? timeOut) - { - return client.BlockingPopItemFromList(listId, timeOut); - } - - public string Dequeue() - { - return client.DequeueItemFromList(listId); - } - - public string BlockingDequeue(TimeSpan? timeOut) - { - return client.BlockingDequeueItemFromList(listId, timeOut); - } - - public string PopAndPush(IRedisList toList) - { - return client.PopAndPushItemBetweenLists(listId, toList.Id); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientManagerConfig.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientManagerConfig.cs deleted file mode 100644 index 044b8e4f..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientManagerConfig.cs +++ /dev/null @@ -1,27 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -namespace ServiceStack.Redis -{ - public class RedisClientManagerConfig - { - public RedisClientManagerConfig() - { - AutoStart = true; //Simplifies the most common use-case - registering in an IOC - } - - public int? DefaultDb { get; set; } - public int MaxReadPoolSize { get; set; } - public int MaxWritePoolSize { get; set; } - public bool AutoStart { get; set; } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientSet.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientSet.cs deleted file mode 100644 index f5128008..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientSet.cs +++ /dev/null @@ -1,173 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientSet - : IRedisSet - { - private readonly RedisClient client; - private readonly string setId; - private const int PageLimit = 1000; - - public RedisClientSet(RedisClient client, string setId) - { - this.client = client; - this.setId = setId; - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromSet(setId).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetSortedEntryValues(setId, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(string item) - { - client.AddItemToSet(setId, item); - } - - public void Clear() - { - client.Remove(setId); - } - - public bool Contains(string item) - { - return client.SetContainsItem(setId, item); - } - - public void CopyTo(string[] array, int arrayIndex) - { - var allItemsInSet = client.GetAllItemsFromSet(setId); - allItemsInSet.CopyTo(array, arrayIndex); - } - - public bool Remove(string item) - { - client.RemoveItemFromSet(setId, item); - return true; - } - - public int Count - { - get - { - return client.GetSetCount(setId); - } - } - - public bool IsReadOnly { get { return false; } } - - public string Id - { - get { return this.setId; } - } - - public List GetRangeFromSortedSet(int startingFrom, int endingAt) - { - return client.GetSortedEntryValues(setId, startingFrom, endingAt); - } - - public HashSet GetAll() - { - return client.GetAllItemsFromSet(setId); - } - - public string Pop() - { - return client.PopItemFromSet(setId); - } - - public void Move(string value, IRedisSet toSet) - { - client.MoveBetweenSets(setId, toSet.Id, value); - } - - private List MergeSetIds(IRedisSet[] withSets) - { - var allSetIds = new List { setId }; - allSetIds.AddRange(withSets.ToList().ConvertAll(x => x.Id)); - return allSetIds; - } - - public HashSet Intersect(params IRedisSet[] withSets) - { - var allSetIds = MergeSetIds(withSets); - return client.GetIntersectFromSets(allSetIds.ToArray()); - } - - public void StoreIntersect(params IRedisSet[] withSets) - { - var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); - client.StoreIntersectFromSets(setId, withSetIds); - } - - public HashSet Union(params IRedisSet[] withSets) - { - var allSetIds = MergeSetIds(withSets); - return client.GetUnionFromSets(allSetIds.ToArray()); - } - - public void StoreUnion(params IRedisSet[] withSets) - { - var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); - client.StoreUnionFromSets(setId, withSetIds); - } - - public HashSet Diff(IRedisSet[] withSets) - { - var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); - return client.GetDifferencesFromSet(setId, withSetIds); - } - - public void StoreDiff(IRedisSet fromSet, params IRedisSet[] withSets) - { - var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); - client.StoreDifferencesFromSet(setId, fromSet.Id, withSetIds); - } - - public string GetRandomEntry() - { - return client.GetRandomItemFromSet(setId); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientSortedSet.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientSortedSet.cs deleted file mode 100644 index c55d9d29..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClientSortedSet.cs +++ /dev/null @@ -1,181 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - /// - /// Wrap the common redis set operations under a ICollection[string] interface. - /// - internal class RedisClientSortedSet - : IRedisSortedSet - { - private readonly RedisClient client; - private readonly string setId; - private const int PageLimit = 1000; - - public RedisClientSortedSet(RedisClient client, string setId) - { - this.client = client; - this.setId = setId; - } - - public IEnumerator GetEnumerator() - { - return this.Count <= PageLimit - ? client.GetAllItemsFromSortedSet(setId).GetEnumerator() - : GetPagingEnumerator(); - } - - public IEnumerator GetPagingEnumerator() - { - var skip = 0; - List pageResults; - do - { - pageResults = client.GetRangeFromSortedSet(setId, skip, skip + PageLimit - 1); - foreach (var result in pageResults) - { - yield return result; - } - skip += PageLimit; - } while (pageResults.Count == PageLimit); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return GetEnumerator(); - } - - public void Add(string item) - { - client.AddItemToSortedSet(setId, item); - } - - public void Clear() - { - client.Remove(setId); - } - - public bool Contains(string item) - { - return client.SortedSetContainsItem(setId, item); - } - - public void CopyTo(string[] array, int arrayIndex) - { - var allItemsInSet = client.GetAllItemsFromSortedSet(setId); - allItemsInSet.CopyTo(array, arrayIndex); - } - - public bool Remove(string item) - { - client.RemoveItemFromSortedSet(setId, item); - return true; - } - - public int Count - { - get - { - return client.GetSortedSetCount(setId); - } - } - - public bool IsReadOnly { get { return false; } } - - public string Id - { - get { return this.setId; } - } - - public List GetAll() - { - return client.GetAllItemsFromSortedSet(setId); - } - - public List GetRange(int startingRank, int endingRank) - { - return client.GetRangeFromSortedSet(setId, startingRank, endingRank); - } - - public List GetRangeByScore(string fromStringScore, string toStringScore) - { - return GetRangeByScore(fromStringScore, toStringScore, null, null); - } - - public List GetRangeByScore(string fromStringScore, string toStringScore, int? skip, int? take) - { - return client.GetRangeFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, skip, take); - } - - public List GetRangeByScore(double fromScore, double toScore) - { - return GetRangeByScore(fromScore, toScore, null, null); - } - - public List GetRangeByScore(double fromScore, double toScore, int? skip, int? take) - { - return client.GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); - } - - public void RemoveRange(int startingFrom, int toRank) - { - client.RemoveRangeFromSortedSet(setId, startingFrom, toRank); - } - - public void RemoveRangeByScore(double fromScore, double toScore) - { - client.RemoveRangeFromSortedSetByScore(setId, fromScore, toScore); - } - - public void StoreFromIntersect(params IRedisSortedSet[] ofSets) - { - client.StoreIntersectFromSets(setId, ofSets.GetIds()); - } - - public void StoreFromUnion(params IRedisSortedSet[] ofSets) - { - client.StoreUnionFromSets(setId, ofSets.GetIds()); - } - - public int GetItemIndex(string value) - { - return client.GetItemIndexInSortedSet(setId, value); - } - - public double GetItemScore(string value) - { - return client.GetItemScoreInSortedSet(setId, value); - } - - public string PopItemWithLowestScore() - { - return client.PopItemWithLowestScoreFromSortedSet(setId); - } - - public string PopItemWithHighestScore() - { - return client.PopItemWithHighestScoreFromSortedSet(setId); - } - - public void IncrementItemScore(string value, double incrementByScore) - { - client.IncrementItemInSortedSet(setId, value, incrementByScore); - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_Hash.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_Hash.cs deleted file mode 100644 index 40c48731..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_Hash.cs +++ /dev/null @@ -1,135 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisClient - : IRedisClient - { - public IHasNamed Hashes { get; set; } - - internal class RedisClientHashes - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientHashes(RedisClient client) - { - this.client = client; - } - - public IRedisHash this[string hashId] - { - get - { - return new RedisClientHash(client, hashId); - } - set - { - var hash = this[hashId]; - hash.Clear(); - hash.CopyTo(value.ToArray(), 0); - } - } - } - - public bool SetEntryInHash(string hashId, string key, string value) - { - return base.HSet(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes()) == Success; - } - - public bool SetEntryInHashIfNotExists(string hashId, string key, string value) - { - return base.HSetNX(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes()) == Success; - } - - public void SetRangeInHash(string hashId, IEnumerable> keyValuePairs) - { - var keyValuePairsList = keyValuePairs.ToList(); - var keys = new byte[keyValuePairsList.Count][]; - var values = new byte[keyValuePairsList.Count][]; - - for (var i = 0; i < keyValuePairsList.Count; i++) - { - var kvp = keyValuePairsList[i]; - keys[i] = kvp.Key.ToUtf8Bytes(); - values[i] = kvp.Value.ToUtf8Bytes(); - } - - base.HMSet(hashId, keys, values); - } - - public int IncrementValueInHash(string hashId, string key, int incrementBy) - { - return base.HIncrby(hashId, key.ToUtf8Bytes(), incrementBy); - } - - public string GetValueFromHash(string hashId, string key) - { - return base.HGet(hashId, key.ToUtf8Bytes()).FromUtf8Bytes(); - } - - public bool HashContainsEntry(string hashId, string key) - { - return base.HExists(hashId, key.ToUtf8Bytes()) == Success; - } - - public bool RemoveEntryFromHash(string hashId, string key) - { - return base.HDel(hashId, key.ToUtf8Bytes()) == Success; - } - - public int GetHashCount(string hashId) - { - return base.HLen(hashId); - } - - public List GetHashKeys(string hashId) - { - var multiDataList = base.HKeys(hashId); - return multiDataList.ToStringList(); - } - - public List GetHashValues(string hashId) - { - var multiDataList = base.HVals(hashId); - return multiDataList.ToStringList(); - } - - public Dictionary GetAllEntriesFromHash(string hashId) - { - var multiDataList = base.HGetAll(hashId); - var map = new Dictionary(); - - for (var i = 0; i < multiDataList.Length; i += 2) - { - var key = multiDataList[i].FromUtf8Bytes(); - map[key] = multiDataList[i + 1].FromUtf8Bytes(); - } - - return map; - } - - public List GetValuesFromHash(string hashId, params string[] keys) - { - var keyBytes = ConvertToBytes(keys); - var multiDataList = base.HMGet(hashId, keyBytes); - return multiDataList.ToStringList(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_List.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_List.cs deleted file mode 100644 index 4453b48c..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_List.cs +++ /dev/null @@ -1,200 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisClient - : IRedisClient - { - const int FirstElement = 0; - const int LastElement = -1; - - public IHasNamed Lists { get; set; } - - internal class RedisClientLists - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientLists(RedisClient client) - { - this.client = client; - } - - public IRedisList this[string listId] - { - get - { - return new RedisClientList(client, listId); - } - set - { - var list = this[listId]; - list.Clear(); - list.CopyTo(value.ToArray(), 0); - } - } - } - - public List GetAllItemsFromList(string listId) - { - var multiDataList = LRange(listId, FirstElement, LastElement); - return multiDataList.ToStringList(); - } - - public List GetRangeFromList(string listId, int startingFrom, int endingAt) - { - var multiDataList = LRange(listId, startingFrom, endingAt); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedList(string listId, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, SortAlpha = true }; - var multiDataList = Sort(listId, sortOptions); - return multiDataList.ToStringList(); - } - - public void AddItemToList(string listId, string value) - { - RPush(listId, value.ToUtf8Bytes()); - } - - public void AddRangeToList(string listId, List values) - { - var uListId = listId.ToUtf8Bytes(); - - var pipeline = CreatePipelineCommand(); - foreach (var value in values) - { - pipeline.WriteCommand(Commands.RPush, uListId, value.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); - } - - public void PrependItemToList(string listId, string value) - { - LPush(listId, value.ToUtf8Bytes()); - } - - public void PrependRangeToList(string listId, List values) - { - var uListId = listId.ToUtf8Bytes(); - - var pipeline = CreatePipelineCommand(); - //ensure list[0] == value[0] after batch operation - for (var i = values.Count - 1; i >= 0; i--) - { - var value = values[i]; - pipeline.WriteCommand(Commands.LPush, uListId, value.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); - } - - public void RemoveAllFromList(string listId) - { - LTrim(listId, LastElement, FirstElement); - } - - public string RemoveStartFromList(string listId) - { - return base.LPop(listId).FromUtf8Bytes(); - } - - public string BlockingRemoveStartFromList(string listId, TimeSpan? timeOut) - { - return base.BLPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } - - public string RemoveEndFromList(string listId) - { - return base.RPop(listId).FromUtf8Bytes(); - } - - public void TrimList(string listId, int keepStartingFrom, int keepEndingAt) - { - LTrim(listId, keepStartingFrom, keepEndingAt); - } - - public int RemoveItemFromList(string listId, string value) - { - return LRem(listId, 0, value.ToUtf8Bytes()); - } - - public int RemoveItemFromList(string listId, string value, int noOfMatches) - { - return LRem(listId, noOfMatches, value.ToUtf8Bytes()); - } - - public int GetListCount(string listId) - { - return LLen(listId); - } - - public string GetItemFromList(string listId, int listIndex) - { - return LIndex(listId, listIndex).FromUtf8Bytes(); - } - - public void SetItemInList(string listId, int listIndex, string value) - { - LSet(listId, listIndex, value.ToUtf8Bytes()); - } - - public void EnqueueItemOnList(string listId, string value) - { - RPush(listId, value.ToUtf8Bytes()); - } - - public string DequeueItemFromList(string listId) - { - return LPop(listId).FromUtf8Bytes(); - } - - public string BlockingDequeueItemFromList(string listId, TimeSpan? timeOut) - { - return BLPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } - - public void PushItemToList(string listId, string value) - { - RPush(listId, value.ToUtf8Bytes()); - } - - public string PopItemFromList(string listId) - { - return RPop(listId).FromUtf8Bytes(); - } - - public string BlockingPopItemFromList(string listId, TimeSpan? timeOut) - { - return BRPopValue(listId, (int)timeOut.GetValueOrDefault().TotalSeconds).FromUtf8Bytes(); - } - - public string PopAndPushItemBetweenLists(string fromListId, string toListId) - { - return RPopLPush(fromListId, toListId).FromUtf8Bytes(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_Set.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_Set.cs deleted file mode 100644 index fa4079ec..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_Set.cs +++ /dev/null @@ -1,172 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisClient - : IRedisClient - { - public IHasNamed Sets { get; set; } - - internal class RedisClientSets - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientSets(RedisClient client) - { - this.client = client; - } - - public IRedisSet this[string setId] - { - get - { - return new RedisClientSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - private static HashSet CreateHashSet(byte[][] multiDataList) - { - var results = new HashSet(); - foreach (var multiData in multiDataList) - { - results.Add(multiData.FromUtf8Bytes()); - } - return results; - } - - public List GetSortedEntryValues(string setId, int startingFrom, int endingAt) - { - var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; - var multiDataList = Sort(setId, sortOptions); - return multiDataList.ToStringList(); - } - - public HashSet GetAllItemsFromSet(string setId) - { - var multiDataList = SMembers(setId); - return CreateHashSet(multiDataList); - } - - public void AddItemToSet(string setId, string item) - { - SAdd(setId, item.ToUtf8Bytes()); - } - - public void AddRangeToSet(string setId, List items) - { - var uSetId = setId.ToUtf8Bytes(); - - var pipeline = CreatePipelineCommand(); - foreach (var item in items) - { - pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); - } - - public void RemoveItemFromSet(string setId, string item) - { - SRem(setId, item.ToUtf8Bytes()); - } - - public string PopItemFromSet(string setId) - { - return SPop(setId).FromUtf8Bytes(); - } - - public void MoveBetweenSets(string fromSetId, string toSetId, string item) - { - SMove(fromSetId, toSetId, item.ToUtf8Bytes()); - } - - public int GetSetCount(string setId) - { - return SCard(setId); - } - - public bool SetContainsItem(string setId, string item) - { - return SIsMember(setId, item.ToUtf8Bytes()) == 1; - } - - public HashSet GetIntersectFromSets(params string[] setIds) - { - if (setIds.Length == 0) - return new HashSet(); - - var multiDataList = SInter(setIds); - return CreateHashSet(multiDataList); - } - - public void StoreIntersectFromSets(string intoSetId, params string[] setIds) - { - if (setIds.Length == 0) return; - - SInterStore(intoSetId, setIds); - } - - public HashSet GetUnionFromSets(params string[] setIds) - { - if (setIds.Length == 0) - return new HashSet(); - - var multiDataList = SUnion(setIds); - return CreateHashSet(multiDataList); - } - - public void StoreUnionFromSets(string intoSetId, params string[] setIds) - { - if (setIds.Length == 0) return; - - SUnionStore(intoSetId, setIds); - } - - public HashSet GetDifferencesFromSet(string fromSetId, params string[] withSetIds) - { - if (withSetIds.Length == 0) - return new HashSet(); - - var multiDataList = SDiff(fromSetId, withSetIds); - return CreateHashSet(multiDataList); - } - - public void StoreDifferencesFromSet(string intoSetId, string fromSetId, params string[] withSetIds) - { - if (withSetIds.Length == 0) return; - - SDiffStore(intoSetId, fromSetId, withSetIds); - } - - public string GetRandomItemFromSet(string setId) - { - return SRandMember(setId).FromUtf8Bytes(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_SortedSet.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_SortedSet.cs deleted file mode 100644 index c8c16a07..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisClient_SortedSet.cs +++ /dev/null @@ -1,327 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Linq; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Redis.Support; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisClient : IRedisClient - { - public IHasNamed SortedSets { get; set; } - - internal class RedisClientSortedSets - : IHasNamed - { - private readonly RedisClient client; - - public RedisClientSortedSets(RedisClient client) - { - this.client = client; - } - - public IRedisSortedSet this[string setId] - { - get - { - return new RedisClientSortedSet(client, setId); - } - set - { - var col = this[setId]; - col.Clear(); - col.CopyTo(value.ToArray(), 0); - } - } - } - - public static double GetLexicalScore(string value) - { - if (string.IsNullOrEmpty(value)) - return 0; - - var lexicalValue = 0; - if (value.Length >= 1) - lexicalValue += value[0] * (int)Math.Pow(256, 3); - - if (value.Length >= 2) - lexicalValue += value[1] * (int)Math.Pow(256, 2); - - if (value.Length >= 3) - lexicalValue += value[2] * (int)Math.Pow(256, 1); - - if (value.Length >= 4) - lexicalValue += value[3]; - - return lexicalValue; - } - - public bool AddItemToSortedSet(string setId, string value) - { - return AddItemToSortedSet(setId, value, GetLexicalScore(value)); - } - - public bool AddItemToSortedSet(string setId, string value, double score) - { - return base.ZAdd(setId, score, value.ToUtf8Bytes()) == Success; - } - - public bool AddRangeToSortedSet(string setId, List values, double score) - { - var pipeline = CreatePipelineCommand(); - var uSetId = setId.ToUtf8Bytes(); - var uScore = score.ToUtf8Bytes(); - - foreach (var value in values) - { - pipeline.WriteCommand(Commands.ZAdd, uSetId, uScore, value.ToUtf8Bytes()); - } - - pipeline.Flush(); - - var success = pipeline.ReadAllAsIntsHaveSuccess(); - return success; - } - - public bool RemoveItemFromSortedSet(string setId, string value) - { - return base.ZRem(setId, value.ToUtf8Bytes()) == Success; - } - - public string PopItemWithLowestScoreFromSortedSet(string setId) - { - //TODO: this should be atomic - var topScoreItemBytes = base.ZRange(setId, FirstElement, 1); - if (topScoreItemBytes.Length == 0) return null; - - base.ZRem(setId, topScoreItemBytes[0]); - return topScoreItemBytes[0].FromUtf8Bytes(); - } - - public string PopItemWithHighestScoreFromSortedSet(string setId) - { - //TODO: this should be atomic - var topScoreItemBytes = base.ZRevRange(setId, FirstElement, 1); - if (topScoreItemBytes.Length == 0) return null; - - base.ZRem(setId, topScoreItemBytes[0]); - return topScoreItemBytes[0].FromUtf8Bytes(); - } - - public bool SortedSetContainsItem(string setId, string value) - { - return base.ZRank(setId, value.ToUtf8Bytes()) != -1; - } - - public double IncrementItemInSortedSet(string setId, string value, double incrementBy) - { - return base.ZIncrBy(setId, incrementBy, value.ToUtf8Bytes()); - } - - public int GetItemIndexInSortedSet(string setId, string value) - { - return base.ZRank(setId, value.ToUtf8Bytes()); - } - - public int GetItemIndexInSortedSetDesc(string setId, string value) - { - return base.ZRevRank(setId, value.ToUtf8Bytes()); - } - - public List GetAllItemsFromSortedSet(string setId) - { - var multiDataList = base.ZRange(setId, FirstElement, LastElement); - return multiDataList.ToStringList(); - } - - public List GetAllItemsFromSortedSetDesc(string setId) - { - var multiDataList = base.ZRevRange(setId, FirstElement, LastElement); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedSet(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRange(setId, fromRank, toRank); - return multiDataList.ToStringList(); - } - - public List GetRangeFromSortedSetDesc(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRevRange(setId, fromRank, toRank); - return multiDataList.ToStringList(); - } - - public IDictionary GetAllWithScoresFromSortedSet(string setId) - { - var multiDataList = base.ZRangeWithScores(setId, FirstElement, LastElement); - return CreateSortedScoreMap(multiDataList); - } - - public IDictionary GetRangeWithScoresFromSortedSet(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRangeWithScores(setId, fromRank, toRank); - return CreateSortedScoreMap(multiDataList); - } - - public IDictionary GetRangeWithScoresFromSortedSetDesc(string setId, int fromRank, int toRank) - { - var multiDataList = base.ZRevRangeWithScores(setId, fromRank, toRank); - return CreateSortedScoreMap(multiDataList); - } - - private static IDictionary CreateSortedScoreMap(byte[][] multiDataList) - { - var map = new OrderedDictionary(); - - for (var i = 0; i < multiDataList.Length; i += 2) - { - var key = multiDataList[i].FromUtf8Bytes(); - double value; - double.TryParse(multiDataList[i + 1].FromUtf8Bytes(), out value); - map[key] = value; - } - - return map; - } - - - public List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, null, null); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore) - { - return GetRangeFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); - } - - public List GetRangeFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRangeByScore(setId, fromScore, toScore, skip, take); - return multiDataList.ToStringList(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromStringScore, toStringScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, skip, take); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore) - { - return GetRangeWithScoresFromSortedSetByLowestScore(setId, fromScore, toScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByLowestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRangeByScoreWithScores(setId, fromScore, toScore, skip, take); - return CreateSortedScoreMap(multiDataList); - } - - - public List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeFromSortedSetByHighestScore(setId, fromStringScore, toStringScore, null, null); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, skip, take); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore) - { - return GetRangeFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); - } - - public List GetRangeFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRevRangeByScore(setId, fromScore, toScore, skip, take); - return multiDataList.ToStringList(); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore) - { - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromStringScore, toStringScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, string fromStringScore, string toStringScore, int? skip, int? take) - { - var fromScore = GetLexicalScore(fromStringScore); - var toScore = GetLexicalScore(toStringScore); - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, skip, take); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore) - { - return GetRangeWithScoresFromSortedSetByHighestScore(setId, fromScore, toScore, null, null); - } - - public IDictionary GetRangeWithScoresFromSortedSetByHighestScore(string setId, double fromScore, double toScore, int? skip, int? take) - { - var multiDataList = base.ZRevRangeByScoreWithScores(setId, fromScore, toScore, skip, take); - return CreateSortedScoreMap(multiDataList); - } - - - - public int RemoveRangeFromSortedSet(string setId, int minRank, int maxRank) - { - return base.ZRemRangeByRank(setId, minRank, maxRank); - } - - public int RemoveRangeFromSortedSetByScore(string setId, double fromScore, double toScore) - { - return base.ZRemRangeByScore(setId, fromScore, toScore); - } - - public int GetSortedSetCount(string setId) - { - return base.ZCard(setId); - } - - public double GetItemScoreInSortedSet(string setId, string value) - { - return base.ZScore(setId, value.ToUtf8Bytes()); - } - - public int StoreIntersectFromSortedSets(string intoSetId, params string[] setIds) - { - return base.ZInterStore(intoSetId, setIds); - } - - public int StoreUnionFromSortedSets(string intoSetId, params string[] setIds) - { - return base.ZUnionStore(intoSetId, setIds); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisException.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisException.cs deleted file mode 100644 index ad716350..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisException.cs +++ /dev/null @@ -1,18 +0,0 @@ -using System; - -namespace ServiceStack.Redis -{ - public class RedisException - : Exception - { - public RedisException(string message) - : base(message) - { - } - - public RedisException(string message, string code) - : base(message) - { - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisExtensions.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisExtensions.cs deleted file mode 100644 index 08a11512..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisExtensions.cs +++ /dev/null @@ -1,82 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; -using System.Net.Sockets; -using System.Text; -using ServiceStack.Common.Web; -using ServiceStack.DesignPatterns.Model; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - internal static class RedisExtensions - { - public static List ToIpEndPoints(this IEnumerable hosts) - { - if (hosts == null) return new List(); - - const int hostOrIpAddressIndex = 0; - const int portIndex = 1; - - var ipEndpoints = new List(); - foreach (var host in hosts) - { - var hostParts = host.Split(':'); - if (hostParts.Length == 0) - throw new ArgumentException("'{0}' is not a valid Host or IP Address: e.g. '127.0.0.0[:11211]'"); - - var port = (hostParts.Length == 1) - ? RedisNativeClient.DefaultPort : int.Parse(hostParts[portIndex]); - - var endpoint = new EndPoint(hostParts[hostOrIpAddressIndex], port); - ipEndpoints.Add(endpoint); - } - return ipEndpoints; - } - - public static bool IsConnected(this Socket socket) - { - try - { - return !(socket.Poll(1, SelectMode.SelectRead) && socket.Available == 0); - } - catch (SocketException) { return false; } - } - - - public static string[] GetIds(this IHasStringId[] itemsWithId) - { - var ids = new string[itemsWithId.Length]; - for (var i = 0; i < itemsWithId.Length; i++) - { - ids[i] = itemsWithId[i].Id; - } - return ids; - } - - public static List ToStringList(this byte[][] multiDataList) - { - if (multiDataList == null) - return new List(); - - var results = new List(); - foreach (var multiData in multiDataList) - { - results.Add(multiData.FromUtf8Bytes()); - } - return results; - } - } - -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisKeyType.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisKeyType.cs deleted file mode 100644 index 7ab488c6..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisKeyType.cs +++ /dev/null @@ -1,24 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -namespace ServiceStack.Redis -{ - public enum RedisKeyType - { - None, - String, - List, - Set, - SortedSet, - Hash - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisLock.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisLock.cs deleted file mode 100644 index cda65c96..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisLock.cs +++ /dev/null @@ -1,29 +0,0 @@ -using System; -using ServiceStack.Common.Extensions; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public class RedisLock - : IDisposable - { - private readonly RedisClient redisClient; - private readonly string key; - - public RedisLock(RedisClient redisClient, string key, TimeSpan? timeOut) - { - this.redisClient = redisClient; - this.key = key; - - ExecExtensions.RetryUntilTrue( - () => redisClient.SetEntryIfNotExists(key, "lock " + DateTime.UtcNow.ToUnixTime()), - timeOut - ); - } - - public void Dispose() - { - redisClient.Remove(key); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisNativeClient.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisNativeClient.cs deleted file mode 100644 index b80c0ff6..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisNativeClient.cs +++ /dev/null @@ -1,1204 +0,0 @@ -// -// redis-sharp.cs: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Miguel de Icaza (miguel@gnome.org) -// -// Copyright 2010 Novell, Inc. -// -// Licensed under the same terms of Redis: new BSD license. -// -//#define DEBUG - -using System; -using System.IO; -using System.Collections.Generic; -using System.Net.Sockets; -using System.Text; -using ServiceStack.Logging; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - /// - /// This class contains all the common operations for the RedisClient. - /// The client contains a 1:1 mapping of c# methods to redis operations of the same name. - /// - /// Not threadsafe use a pooled manager - /// - public partial class RedisNativeClient - : IRedisNativeClient - { - private static readonly ILog log = LogManager.GetLogger(typeof(RedisNativeClient)); - - public const int DefaultDb = 0; - public const int DefaultPort = 6379; - public const string DefaultHost = "localhost"; - - internal const int Success = 1; - internal const int OneGb = 1073741824; - private readonly byte[] endData = new[] { (byte)'\r', (byte)'\n' }; - - private int clientPort; - private string lastCommand; - private SocketException lastSocketException; - public bool HadExceptions { get; protected set; } - - protected Socket socket; - protected BufferedStream Bstream; - - /// - /// Used to manage connection pooling - /// - internal bool Active { get; set; } - internal PooledRedisClientManager ClientManager { get; set; } - - internal int IdleTimeOutSecs = 240; //default on redis is 300 - internal long LastConnectedAtTimestamp; - - public int Id { get; set; } - - public string Host { get; private set; } - public int Port { get; private set; } - public int RetryTimeout { get; set; } - public int RetryCount { get; set; } - public int SendTimeout { get; set; } - public string Password { get; set; } - - internal IRedisQueableTransaction CurrentTransaction { get; set; } - - public RedisNativeClient(string host) - : this(host, DefaultPort) - { - } - - public RedisNativeClient(string host, int port) - { - if (host == null) - throw new ArgumentNullException("host"); - - Host = host; - Port = port; - SendTimeout = -1; - } - - public RedisNativeClient() - : this(DefaultHost, DefaultPort) - { - } - - - - #region Common Operations - - int db; - public int Db - { - get - { - return db; - } - - set - { - db = value; - SendExpectSuccess(Commands.Select, db.ToUtf8Bytes()); - } - } - - public int DbSize - { - get - { - return SendExpectInt(Commands.DbSize); - } - } - - public DateTime LastSave - { - get - { - var t = SendExpectInt(Commands.LastSave); - return DateTimeExtensions.FromUnixTime(t); - } - } - - public Dictionary Info - { - get - { - var lines = SendExpectString(Commands.Info); - var dict = new Dictionary(); - - foreach (var line in lines - .Split(new[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)) - { - var p = line.IndexOf(':'); - if (p == -1) continue; - - dict.Add(line.Substring(0, p), line.Substring(p + 1)); - } - return dict; - } - } - - public string ServerVersion - { - get - { - string version; - this.Info.TryGetValue("redis_version", out version); - return version; - } - } - - private bool? isPreVersion1_26; - protected bool IsPreVersion1_26 - { - get - { - if (isPreVersion1_26 == null) - { - isPreVersion1_26 = this.ServerVersion.CompareTo("1.2.6") <= 0; - } - return isPreVersion1_26.Value; - } - } - - public bool Ping() - { - return SendExpectCode(Commands.Ping) == "PONG"; - } - - public string Echo(string text) - { - return SendExpectData(Commands.Echo, text.ToUtf8Bytes()).FromUtf8Bytes(); - } - - public void SlaveOf(string hostname, int port) - { - SendExpectSuccess(Commands.SlaveOf, hostname.ToUtf8Bytes(), port.ToUtf8Bytes()); - } - - public void SlaveOfNoOne() - { - SendExpectSuccess(Commands.SlaveOf, Commands.No, Commands.One); - } - - public string Type(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectCode(Commands.Type, key.ToUtf8Bytes()); - } - - public RedisKeyType GetEntryType(string key) - { - switch (Type(key)) - { - case "none": - return RedisKeyType.None; - case "string": - return RedisKeyType.String; - case "set": - return RedisKeyType.Set; - case "list": - return RedisKeyType.List; - case "zset": - return RedisKeyType.SortedSet; - case "hash": - return RedisKeyType.Hash; - } - throw CreateResponseError("Invalid value"); - } - - public void Set(string key, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); - value = value ?? new byte[0]; - - if (value.Length > OneGb) - throw new ArgumentException("value exceeds 1G", "value"); - - SendExpectSuccess(Commands.Set, key.ToUtf8Bytes(), value); - } - - public void SetEx(string key, int expireInSeconds, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); - value = value ?? new byte[0]; - - if (value.Length > OneGb) - throw new ArgumentException("value exceeds 1G", "value"); - - var doesNotSupportSetEx = this.ServerVersion.CompareTo("1.3.9") <= 0; - if (doesNotSupportSetEx) - { - SendExpectSuccess(Commands.Set, key.ToUtf8Bytes(), value); - SendExpectSuccess(Commands.Expire, key.ToUtf8Bytes(), expireInSeconds.ToUtf8Bytes()); - return; - } - - SendExpectSuccess(Commands.SetEx, key.ToUtf8Bytes(), expireInSeconds.ToUtf8Bytes(), value); - } - - public int SetNX(string key, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); - value = value ?? new byte[0]; - - if (value.Length > OneGb) - throw new ArgumentException("value exceeds 1G", "value"); - - return SendExpectInt(Commands.SetNx, key.ToUtf8Bytes(), value); - } - - public void MSet(byte[][] keys, byte[][] values) - { - var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); - - SendExpectSuccess(keysAndValues); - } - - public byte[] Get(string key) - { - return GetBytes(key); - } - - public byte[] GetBytes(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectData(Commands.Get, key.ToUtf8Bytes()); - } - - public byte[] GetSet(string key, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); - - value = value ?? new byte[0]; - - if (value.Length > OneGb) - throw new ArgumentException("value exceeds 1G", "value"); - - return SendExpectData(Commands.GetSet, key.ToUtf8Bytes(), value); - } - - public int Exists(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Exists, key.ToUtf8Bytes()); - } - - public int Del(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Del, key.ToUtf8Bytes()); - } - - public int Del(params string[] keys) - { - if (keys == null) - throw new ArgumentNullException("keys"); - - var cmdWithArgs = MergeCommandWithArgs(Commands.Del, keys); - return SendExpectInt(cmdWithArgs); - } - - public int Incr(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Incr, key.ToUtf8Bytes()); - } - - public int IncrBy(string key, int count) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.IncrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); - } - - public int Decr(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Decr, key.ToUtf8Bytes()); - } - - public int DecrBy(string key, int count) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.DecrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); - } - - public int Append(string key, byte[] value) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Append, key.ToUtf8Bytes(), value); - } - - public byte[] Substr(string key, int fromIndex, int toIndex) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectData(Commands.Substr, key.ToUtf8Bytes(), fromIndex.ToUtf8Bytes(), toIndex.ToUtf8Bytes()); - } - - public string RandomKey() - { - return SendExpectData(Commands.RandomKey).FromUtf8Bytes(); - } - - public void Rename(string oldKeyname, string newKeyname) - { - if (oldKeyname == null) - throw new ArgumentNullException("oldKeyname"); - if (newKeyname == null) - throw new ArgumentNullException("newKeyname"); - - SendExpectSuccess(Commands.Rename, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()); - } - - public int Expire(string key, int seconds) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Expire, key.ToUtf8Bytes(), seconds.ToUtf8Bytes()); - } - - public int ExpireAt(string key, long unixTime) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.ExpireAt, key.ToUtf8Bytes(), unixTime.ToUtf8Bytes()); - } - - public int Ttl(string key) - { - if (key == null) - throw new ArgumentNullException("key"); - - return SendExpectInt(Commands.Ttl, key.ToUtf8Bytes()); - } - - public void Save() - { - SendExpectSuccess(Commands.Save); - } - - public void SaveAsync() - { - BgSave(); - } - - public void BgSave() - { - SendExpectSuccess(Commands.BgSave); - } - - public void Shutdown() - { - SendExpectSuccess(Commands.Shutdown); - } - - public void BgRewriteAof() - { - SendExpectSuccess(Commands.BgRewriteAof); - } - - public void Quit() - { - SendCommand(Commands.Quit); - } - - public void FlushDb() - { - SendExpectSuccess(Commands.FlushDb); - } - - public void FlushAll() - { - SendExpectSuccess(Commands.FlushAll); - } - - //Old behaviour pre 1.3.7 - public byte[] KeysV126(string pattern) - { - if (pattern == null) - throw new ArgumentNullException("pattern"); - - return SendExpectData(Commands.Keys, pattern.ToUtf8Bytes()); - } - - public byte[][] Keys(string pattern) - { - if (pattern == null) - throw new ArgumentNullException("pattern"); - - return SendExpectMultiData(Commands.Keys, pattern.ToUtf8Bytes()); - } - - public byte[][] MGet(params string[] keys) - { - if (keys == null) - throw new ArgumentNullException("keys"); - if (keys.Length == 0) - throw new ArgumentException("keys"); - - var cmdWithArgs = MergeCommandWithArgs(Commands.MGet, keys); - - return SendExpectMultiData(cmdWithArgs); - } - - internal void Multi() - { - if (!SendCommand(Commands.Multi)) - throw CreateConnectionError(); - - ExpectOk(); - } - - /// - /// Requires custom result parsing - /// - /// Number of results - internal int Exec() - { - if (!SendCommand(Commands.Exec)) - throw CreateConnectionError(); - - return this.ReadMultiDataResultCount(); - } - - internal void Discard() - { - SendExpectSuccess(Commands.Discard); - } - - #endregion - - - #region Set Operations - - public byte[][] SMembers(string setId) - { - return SendExpectMultiData(Commands.SMembers, setId.ToUtf8Bytes()); - } - - public int SAdd(string setId, byte[] value) - { - AssertSetIdAndValue(setId, value); - - return SendExpectInt(Commands.SAdd, setId.ToUtf8Bytes(), value); - } - - public void SRem(string setId, byte[] value) - { - AssertSetIdAndValue(setId, value); - - SendExpectSuccess(Commands.SRem, setId.ToUtf8Bytes(), value); - } - - public byte[] SPop(string setId) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectData(Commands.SPop, setId.ToUtf8Bytes()); - } - - public void SMove(string fromSetId, string toSetId, byte[] value) - { - if (fromSetId == null) - throw new ArgumentNullException("fromSetId"); - if (toSetId == null) - throw new ArgumentNullException("toSetId"); - - SendExpectSuccess(Commands.SMove, fromSetId.ToUtf8Bytes(), toSetId.ToUtf8Bytes(), value); - } - - public int SCard(string setId) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectInt(Commands.SCard, setId.ToUtf8Bytes()); - } - - public int SIsMember(string setId, byte[] value) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectInt(Commands.SIsMember, setId.ToUtf8Bytes(), value); - } - - public byte[][] SInter(params string[] setIds) - { - var cmdWithArgs = MergeCommandWithArgs(Commands.SInter, setIds); - return SendExpectMultiData(cmdWithArgs); - } - - public void SInterStore(string intoSetId, params string[] setIds) - { - var setIdsList = new List(setIds); - setIdsList.Insert(0, intoSetId); - - var cmdWithArgs = MergeCommandWithArgs(Commands.SInterStore, setIdsList.ToArray()); - SendExpectSuccess(cmdWithArgs); - } - - public byte[][] SUnion(params string[] setIds) - { - var cmdWithArgs = MergeCommandWithArgs(Commands.SUnion, setIds); - return SendExpectMultiData(cmdWithArgs); - } - - public void SUnionStore(string intoSetId, params string[] setIds) - { - var setIdsList = new List(setIds); - setIdsList.Insert(0, intoSetId); - - var cmdWithArgs = MergeCommandWithArgs(Commands.SUnionStore, setIdsList.ToArray()); - SendExpectSuccess(cmdWithArgs); - } - - public byte[][] SDiff(string fromSetId, params string[] withSetIds) - { - var setIdsList = new List(withSetIds); - setIdsList.Insert(0, fromSetId); - - var cmdWithArgs = MergeCommandWithArgs(Commands.SDiff, setIdsList.ToArray()); - return SendExpectMultiData(cmdWithArgs); - } - - public void SDiffStore(string intoSetId, string fromSetId, params string[] withSetIds) - { - var setIdsList = new List(withSetIds); - setIdsList.Insert(0, fromSetId); - setIdsList.Insert(0, intoSetId); - - var cmdWithArgs = MergeCommandWithArgs(Commands.SDiffStore, setIdsList.ToArray()); - SendExpectSuccess(cmdWithArgs); - } - - public byte[] SRandMember(string setId) - { - return SendExpectData(Commands.SRandMember, setId.ToUtf8Bytes()); - } - - #endregion - - - #region List Operations - - public byte[][] LRange(string listId, int startingFrom, int endingAt) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectMultiData(Commands.LRange, listId.ToUtf8Bytes(), startingFrom.ToUtf8Bytes(), endingAt.ToUtf8Bytes()); - } - - public byte[][] Sort(string listOrSetId, SortOptions sortOptions) - { - var cmdWithArgs = new List - { - Commands.Sort, listOrSetId.ToUtf8Bytes() - }; - - if (sortOptions.SortPattern != null) - { - cmdWithArgs.Add(Commands.By); - cmdWithArgs.Add(sortOptions.SortPattern.ToUtf8Bytes()); - } - - if (sortOptions.Skip.HasValue || sortOptions.Take.HasValue) - { - cmdWithArgs.Add(Commands.Limit); - cmdWithArgs.Add(sortOptions.Skip.GetValueOrDefault(0).ToUtf8Bytes()); - cmdWithArgs.Add(sortOptions.Take.GetValueOrDefault(0).ToUtf8Bytes()); - } - - if (sortOptions.GetPattern != null) - { - cmdWithArgs.Add(Commands.Get); - cmdWithArgs.Add(sortOptions.GetPattern.ToUtf8Bytes()); - } - - if (sortOptions.SortDesc) - { - cmdWithArgs.Add(Commands.Desc); - } - - if (sortOptions.SortAlpha) - { - cmdWithArgs.Add(Commands.Alpha); - } - - if (sortOptions.StoreAtKey != null) - { - cmdWithArgs.Add(Commands.Store); - cmdWithArgs.Add(sortOptions.StoreAtKey.ToUtf8Bytes()); - } - - return SendExpectMultiData(cmdWithArgs.ToArray()); - } - - public int RPush(string listId, byte[] value) - { - AssertListIdAndValue(listId, value); - - if (IsPreVersion1_26) - { - SendExpectSuccess(Commands.RPush, listId.ToUtf8Bytes(), value); - return -1; - } - - return SendExpectInt(Commands.RPush, listId.ToUtf8Bytes(), value); - } - - public int LPush(string listId, byte[] value) - { - AssertListIdAndValue(listId, value); - - if (IsPreVersion1_26) - { - SendExpectSuccess(Commands.LPush, listId.ToUtf8Bytes(), value); - return -1; - } - - return SendExpectInt(Commands.LPush, listId.ToUtf8Bytes(), value); - } - - public void LTrim(string listId, int keepStartingFrom, int keepEndingAt) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - SendExpectSuccess(Commands.LTrim, listId.ToUtf8Bytes(), keepStartingFrom.ToUtf8Bytes(), keepEndingAt.ToUtf8Bytes()); - } - - public int LRem(string listId, int removeNoOfMatches, byte[] value) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectInt(Commands.LRem, listId.ToUtf8Bytes(), removeNoOfMatches.ToUtf8Bytes(), value); - } - - public int LLen(string listId) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectInt(Commands.LLen, listId.ToUtf8Bytes()); - } - - public byte[] LIndex(string listId, int listIndex) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectData(Commands.LIndex, listId.ToUtf8Bytes(), listIndex.ToUtf8Bytes()); - } - - public void LSet(string listId, int listIndex, byte[] value) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - SendExpectSuccess(Commands.LSet, listId.ToUtf8Bytes(), listIndex.ToUtf8Bytes(), value); - } - - public byte[] LPop(string listId) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectData(Commands.LPop, listId.ToUtf8Bytes()); - } - - public byte[] RPop(string listId) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectData(Commands.RPop, listId.ToUtf8Bytes()); - } - - public byte[][] BLPop(string listId, int timeOutSecs) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectMultiData(Commands.BLPop, listId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); - } - - public byte[] BLPopValue(string listId, int timeOutSecs) - { - var blockingResponse = BLPop(listId, timeOutSecs); - return blockingResponse.Length == 0 - ? null - : blockingResponse[1]; - } - - public byte[][] BRPop(string listId, int timeOutSecs) - { - if (listId == null) - throw new ArgumentNullException("listId"); - - return SendExpectMultiData(Commands.BRPop, listId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); - } - - public byte[] BRPopValue(string listId, int timeOutSecs) - { - var blockingResponse = BRPop(listId, timeOutSecs); - return blockingResponse.Length == 0 - ? null - : blockingResponse[1]; - } - - public byte[] RPopLPush(string fromListId, string toListId) - { - if (fromListId == null) - throw new ArgumentNullException("fromListId"); - if (toListId == null) - throw new ArgumentNullException("toListId"); - - return SendExpectData(Commands.RPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes()); - } - - #endregion - - - #region Sorted Set Operations - - private static void AssertSetIdAndValue(string setId, byte[] value) - { - if (setId == null) - throw new ArgumentNullException("setId"); - if (value == null) - throw new ArgumentNullException("value"); - } - - public int ZAdd(string setId, double score, byte[] value) - { - AssertSetIdAndValue(setId, value); - - return SendExpectInt(Commands.ZAdd, setId.ToUtf8Bytes(), score.ToUtf8Bytes(), value); - } - - public int ZRem(string setId, byte[] value) - { - AssertSetIdAndValue(setId, value); - - return SendExpectInt(Commands.ZRem, setId.ToUtf8Bytes(), value); - } - - public double ZIncrBy(string setId, double incrBy, byte[] value) - { - AssertSetIdAndValue(setId, value); - - return SendExpectDouble(Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToUtf8Bytes(), value); - } - - public int ZRank(string setId, byte[] value) - { - AssertSetIdAndValue(setId, value); - - return SendExpectInt(Commands.ZRank, setId.ToUtf8Bytes(), value); - } - - public int ZRevRank(string setId, byte[] value) - { - AssertSetIdAndValue(setId, value); - - return SendExpectInt(Commands.ZRevRank, setId.ToUtf8Bytes(), value); - } - - private byte[][] GetRange(byte[] commandBytes, string setId, int min, int max, bool withScores) - { - if (string.IsNullOrEmpty(setId)) - throw new ArgumentNullException("setId"); - - var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; - - if (withScores) - { - cmdWithArgs.Add(Commands.WithScores); - } - - return SendExpectMultiData(cmdWithArgs.ToArray()); - } - - public byte[][] ZRange(string setId, int min, int max) - { - return SendExpectMultiData(Commands.ZRange, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); - } - - public byte[][] ZRangeWithScores(string setId, int min, int max) - { - return GetRange(Commands.ZRange, setId, min, max, true); - } - - public byte[][] ZRevRange(string setId, int min, int max) - { - return GetRange(Commands.ZRevRange, setId, min, max, false); - } - - public byte[][] ZRevRangeWithScores(string setId, int min, int max) - { - return GetRange(Commands.ZRevRange, setId, min, max, true); - } - - private byte[][] GetRangeByScore(byte[] commandBytes, - string setId, double min, double max, int? skip, int? take, bool withScores) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; - - if (skip.HasValue || take.HasValue) - { - cmdWithArgs.Add(Commands.Limit); - cmdWithArgs.Add(skip.GetValueOrDefault(0).ToUtf8Bytes()); - cmdWithArgs.Add(take.GetValueOrDefault(0).ToUtf8Bytes()); - } - - if (withScores) - { - cmdWithArgs.Add(Commands.WithScores); - } - - return SendExpectMultiData(cmdWithArgs.ToArray()); - } - - public byte[][] ZRangeByScore(string setId, double min, double max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, false); - } - - public byte[][] ZRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRangeByScore, setId, min, max, skip, take, true); - } - - public byte[][] ZRevRangeByScore(string setId, double min, double max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRevRangeByScore, setId, min, max, skip, take, false); - } - - public byte[][] ZRevRangeByScoreWithScores(string setId, double min, double max, int? skip, int? take) - { - return GetRangeByScore(Commands.ZRevRangeByScore, setId, min, max, skip, take, true); - } - - public int ZRemRangeByRank(string setId, int min, int max) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectInt(Commands.ZRemRangeByRank, setId.ToUtf8Bytes(), - min.ToUtf8Bytes(), max.ToUtf8Bytes()); - } - - public int ZRemRangeByScore(string setId, double fromScore, double toScore) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectInt(Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), - fromScore.ToUtf8Bytes(), toScore.ToUtf8Bytes()); - } - - public int ZCard(string setId) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectInt(Commands.ZCard, setId.ToUtf8Bytes()); - } - - public double ZScore(string setId, byte[] value) - { - if (setId == null) - throw new ArgumentNullException("setId"); - - return SendExpectDouble(Commands.ZScore, setId.ToUtf8Bytes(), value); - } - - public int ZUnionStore(string intoSetId, params string[] setIds) - { - var setIdsList = new List(setIds); - setIdsList.Insert(0, setIds.Length.ToString()); - setIdsList.Insert(0, intoSetId); - - var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, setIdsList.ToArray()); - return SendExpectInt(cmdWithArgs); - } - - public int ZInterStore(string intoSetId, params string[] setIds) - { - var setIdsList = new List(setIds); - setIdsList.Insert(0, setIds.Length.ToString()); - setIdsList.Insert(0, intoSetId); - - var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, setIdsList.ToArray()); - return SendExpectInt(cmdWithArgs); - } - - #endregion - - - #region Hash Operations - - private static void AssertHashIdAndKey(string hashId, byte[] key) - { - if (hashId == null) - throw new ArgumentNullException("hashId"); - if (key == null) - throw new ArgumentNullException("key"); - } - - public int HSet(string hashId, byte[] key, byte[] value) - { - AssertHashIdAndKey(hashId, key); - - return SendExpectInt(Commands.HSet, hashId.ToUtf8Bytes(), key, value); - } - - public int HSetNX(string hashId, byte[] key, byte[] value) - { - AssertHashIdAndKey(hashId, key); - - return SendExpectInt(Commands.HSetNx, hashId.ToUtf8Bytes(), key, value); - } - - public void HMSet(string hashId, byte[][] keys, byte[][] values) - { - if (hashId == null) - throw new ArgumentNullException("hashId"); - - var cmdArgs = MergeCommandWithKeysAndValues(Commands.HMSet, hashId.ToUtf8Bytes(), keys, values); - - SendExpectSuccess(cmdArgs); - } - - public int HIncrby(string hashId, byte[] key, int incrementBy) - { - AssertHashIdAndKey(hashId, key); - - return SendExpectInt(Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); - } - - public byte[] HGet(string hashId, byte[] key) - { - AssertHashIdAndKey(hashId, key); - - return SendExpectData(Commands.HGet, hashId.ToUtf8Bytes(), key); - } - - public byte[][] HMGet(string hashId, params byte[][] keys) - { - if (hashId == null) - throw new ArgumentNullException("hashId"); - if (keys.Length == 0) - throw new ArgumentNullException("keys"); - - var cmdArgs = MergeCommandWithArgs(Commands.HMGet, hashId.ToUtf8Bytes(), keys); - - return SendExpectMultiData(cmdArgs); - } - - public int HDel(string hashId, byte[] key) - { - AssertHashIdAndKey(hashId, key); - - return SendExpectInt(Commands.HDel, hashId.ToUtf8Bytes(), key); - } - - public int HExists(string hashId, byte[] key) - { - AssertHashIdAndKey(hashId, key); - - return SendExpectInt(Commands.HExists, hashId.ToUtf8Bytes(), key); - } - - public int HLen(string hashId) - { - if (string.IsNullOrEmpty(hashId)) - throw new ArgumentNullException("hashId"); - - return SendExpectInt(Commands.HLen, hashId.ToUtf8Bytes()); - } - - public byte[][] HKeys(string hashId) - { - if (hashId == null) - throw new ArgumentNullException("hashId"); - - return SendExpectMultiData(Commands.HKeys, hashId.ToUtf8Bytes()); - } - - public byte[][] HVals(string hashId) - { - if (hashId == null) - throw new ArgumentNullException("hashId"); - - return SendExpectMultiData(Commands.HVals, hashId.ToUtf8Bytes()); - } - - public byte[][] HGetAll(string hashId) - { - if (hashId == null) - throw new ArgumentNullException("hashId"); - - return SendExpectMultiData(Commands.HGetAll, hashId.ToUtf8Bytes()); - } - - public int Publish(string toChannel, byte[] message) - { - return SendExpectInt(Commands.Publish, toChannel.ToUtf8Bytes(), message); - } - - public byte[][] ReceiveMessages() - { - return ReadMultiData(); - } - - public byte[][] Subscribe(params string[] toChannels) - { - if (toChannels.Length == 0) - throw new ArgumentNullException("toChannels"); - - var cmdWithArgs = MergeCommandWithArgs(Commands.Subscribe, toChannels); - return SendExpectMultiData(cmdWithArgs); - } - - public byte[][] UnSubscribe(params string[] fromChannels) - { - var cmdWithArgs = MergeCommandWithArgs(Commands.UnSubscribe, fromChannels); - return SendExpectMultiData(cmdWithArgs); - } - - public byte[][] PSubscribe(params string[] toChannelsMatchingPatterns) - { - if (toChannelsMatchingPatterns.Length == 0) - throw new ArgumentNullException("toChannelsMatchingPatterns"); - - var cmdWithArgs = MergeCommandWithArgs(Commands.PSubscribe, toChannelsMatchingPatterns); - return SendExpectMultiData(cmdWithArgs); - } - - public byte[][] PUnSubscribe(params string[] fromChannelsMatchingPatterns) - { - var cmdWithArgs = MergeCommandWithArgs(Commands.PUnSubscribe, fromChannelsMatchingPatterns); - return SendExpectMultiData(cmdWithArgs); - } - - public RedisPipelineCommand CreatePipelineCommand() - { - AssertConnectedSocket(); - return new RedisPipelineCommand(this); - } - - #endregion - - internal bool IsDisposed { get; set; } - - public void Dispose() - { - Dispose(true); - GC.SuppressFinalize(this); - } - - ~RedisNativeClient() - { - Dispose(false); - } - - protected virtual void Dispose(bool disposing) - { - if (ClientManager != null) - { - ClientManager.DisposeClient(this); - return; - } - - if (disposing) - { - //dispose un managed resources - DisposeConnection(); - } - } - - internal void DisposeConnection() - { - if (IsDisposed) throw new ObjectDisposedException("Redis client already disposed"); - IsDisposed = true; - - if (socket == null) return; - - try - { - Quit(); - } - catch (Exception ex) - { - log.Error("Error when trying to Quit()", ex); - } - finally - { - SafeConnectionClose(); - } - } - - private void SafeConnectionClose() - { - try - { - // workaround for a .net bug: http://support.microsoft.com/kb/821625 - if (Bstream != null) - Bstream.Close(); - } - catch { } - try - { - if (socket != null) - socket.Close(); - } - catch { } - Bstream = null; - socket = null; - } - } -} diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisNativeClient_Utils.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisNativeClient_Utils.cs deleted file mode 100644 index e9dfde14..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisNativeClient_Utils.cs +++ /dev/null @@ -1,621 +0,0 @@ -// -// redis-sharp.cs: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Miguel de Icaza (miguel@gnome.org) -// -// Copyright 2010 Novell, Inc. -// -// Licensed under the same terms of Redis: new BSD license. -// - -using System; -using System.Diagnostics; -using System.IO; -using System.Net; -using System.Net.Sockets; -using System.Text; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public partial class RedisNativeClient - { - private void Connect() - { - socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) { - SendTimeout = SendTimeout - }; - try - { - socket.Connect(Host, Port); - - if (!socket.Connected) - { - socket.Close(); - socket = null; - return; - } - Bstream = new BufferedStream(new NetworkStream(socket), 16 * 1024); - - if (Password != null) - SendExpectSuccess(Commands.Auth, Password.ToUtf8Bytes()); - - db = 0; - var ipEndpoint = socket.LocalEndPoint as IPEndPoint; - clientPort = ipEndpoint != null ? ipEndpoint.Port : -1; - lastCommand = null; - lastSocketException = null; - LastConnectedAtTimestamp = Stopwatch.GetTimestamp(); - - if (isPreVersion1_26 == null) - { - isPreVersion1_26 = this.ServerVersion.CompareTo("1.2.6") <= 0; - - //force version reload - log.DebugFormat("redis-server Version: {0}", isPreVersion1_26); - } - } - catch (SocketException ex) - { - HadExceptions = true; - var throwEx = new InvalidOperationException("could not connect to redis Instance at " + Host + ":" + Port, ex); - log.Error(throwEx.Message, ex); - throw throwEx; - } - } - - protected string ReadLine() - { - var sb = new StringBuilder(); - - int c; - while ((c = Bstream.ReadByte()) != -1) - { - if (c == '\r') - continue; - if (c == '\n') - break; - sb.Append((char)c); - } - return sb.ToString(); - } - - private bool AssertConnectedSocket() - { - if (LastConnectedAtTimestamp > 0) - { - var now = Stopwatch.GetTimestamp(); - var elapsedSecs = (now - LastConnectedAtTimestamp) / Stopwatch.Frequency; - - if (elapsedSecs > IdleTimeOutSecs && !socket.IsConnected()) - { - return Reconnect(); - } - LastConnectedAtTimestamp = now; - } - - if (socket == null) - { - var previousDb = db; - Connect(); - if (previousDb != DefaultDb) this.Db = previousDb; - } - - var isConnected = socket != null; - - return isConnected; - } - - private bool Reconnect() - { - var previousDb = db; - - SafeConnectionClose(); - Connect(); //sets db to 0 - - if (previousDb != DefaultDb) this.Db = previousDb; - - return socket != null; - } - - private bool HandleSocketException(SocketException ex) - { - HadExceptions = true; - log.Error("SocketException: ", ex); - - lastSocketException = ex; - - // timeout? - socket.Close(); - socket = null; - - return false; - } - - private RedisResponseException CreateResponseError(string error) - { - HadExceptions = true; - var throwEx = new RedisResponseException( - string.Format("{0}, sPort: {1}, LastCommand: {2}", - error, clientPort, lastCommand)); - log.Error(throwEx.Message); - throw throwEx; - } - - private Exception CreateConnectionError() - { - HadExceptions = true; - var throwEx = new Exception( - string.Format("Unable to Connect: sPort: {0}", - clientPort), lastSocketException); - log.Error(throwEx.Message); - throw throwEx; - } - - private static byte[] GetCmdBytes(char cmdPrefix, int noOfLines) - { - var strLines = noOfLines.ToString(); - var strLinesLength = strLines.Length; - - var cmdBytes = new byte[1 + strLinesLength + 2]; - cmdBytes[0] = (byte)cmdPrefix; - - for (var i = 0; i < strLinesLength; i++) - cmdBytes[i + 1] = (byte)strLines[i]; - - cmdBytes[1 + strLinesLength] = 0x0D; // \r - cmdBytes[2 + strLinesLength] = 0x0A; // \n - - return cmdBytes; - } - - /// - /// Command to set multuple binary safe arguments - /// - /// - /// - protected bool SendCommand(params byte[][] cmdWithBinaryArgs) - { - if (!AssertConnectedSocket()) return false; - - try - { - CmdLog(cmdWithBinaryArgs); - - //Total command lines count - WriteAllToSendBuffer(cmdWithBinaryArgs); - - FlushSendBuffer(); - } - catch (SocketException ex) - { - cmdBufferIndex = 0; - return HandleSocketException(ex); - } - return true; - } - - public void WriteAllToSendBuffer(params byte[][] cmdWithBinaryArgs) - { - WriteToSendBuffer(GetCmdBytes('*', cmdWithBinaryArgs.Length)); - - foreach (var safeBinaryValue in cmdWithBinaryArgs) - { - WriteToSendBuffer(GetCmdBytes('$', safeBinaryValue.Length)); - WriteToSendBuffer(safeBinaryValue); - WriteToSendBuffer(endData); - } - } - - byte[] cmdBuffer = new byte[32 * 1024]; - int cmdBufferIndex = 0; - - public void WriteToSendBuffer(byte[] cmdBytes) - { - if ((cmdBufferIndex + cmdBytes.Length) > cmdBuffer.Length) - { - const int breathingSpaceToReduceReallocations = (32 * 1024); - var newLargerBuffer = new byte[cmdBufferIndex + cmdBytes.Length + breathingSpaceToReduceReallocations]; - Buffer.BlockCopy(cmdBuffer, 0, newLargerBuffer, 0, cmdBuffer.Length); - cmdBuffer = newLargerBuffer; - } - - Buffer.BlockCopy(cmdBytes, 0, cmdBuffer, cmdBufferIndex, cmdBytes.Length); - cmdBufferIndex += cmdBytes.Length; - } - - public void FlushSendBuffer() - { - socket.Send(cmdBuffer, cmdBufferIndex, SocketFlags.None); - cmdBufferIndex = 0; - } - - private int SafeReadByte() - { - return Bstream.ReadByte(); - } - - private void SendExpectSuccess(params byte[][] cmdWithBinaryArgs) - { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); - - if (this.CurrentTransaction != null) - { - this.CurrentTransaction.CompleteVoidQueuedCommand(ExpectSuccess); - ExpectQueued(); - return; - } - ExpectSuccess(); - } - - private int SendExpectInt(params byte[][] cmdWithBinaryArgs) - { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); - - if (this.CurrentTransaction != null) - { - this.CurrentTransaction.CompleteIntQueuedCommand(ReadInt); - ExpectQueued(); - return default(int); - } - return ReadInt(); - } - - private byte[] SendExpectData(params byte[][] cmdWithBinaryArgs) - { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); - - if (this.CurrentTransaction != null) - { - this.CurrentTransaction.CompleteBytesQueuedCommand(ReadData); - ExpectQueued(); - return null; - } - return ReadData(); - } - - private string SendExpectString(params byte[][] cmdWithBinaryArgs) - { - var bytes = SendExpectData(cmdWithBinaryArgs); - return bytes.FromUtf8Bytes(); - } - private double SendExpectDouble(params byte[][] cmdWithBinaryArgs) - { - return parseDouble( SendExpectData(cmdWithBinaryArgs) ); - } - private double parseDouble(byte[] doubleBytes) - { - var doubleString = Encoding.UTF8.GetString(doubleBytes); - - double d; - double.TryParse(doubleString, out d); - - return d; - } - - private string SendExpectCode(params byte[][] cmdWithBinaryArgs) - { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); - - if (this.CurrentTransaction != null) - { - this.CurrentTransaction.CompleteBytesQueuedCommand(ReadData); - ExpectQueued(); - return null; - } - - return ExpectCode(); - } - - private byte[][] SendExpectMultiData(params byte[][] cmdWithBinaryArgs) - { - if (!SendCommand(cmdWithBinaryArgs)) - throw CreateConnectionError(); - - if (this.CurrentTransaction != null) - { - this.CurrentTransaction.CompleteMultiBytesQueuedCommand(ReadMultiData); - ExpectQueued(); - return new byte[0][]; - } - return ReadMultiData(); - } - - [Conditional("DEBUG")] - protected void Log(string fmt, params object[] args) - { - log.DebugFormat("{0}", string.Format(fmt, args).Trim()); - } - - [Conditional("DEBUG")] - protected void CmdLog(byte[][] args) - { - var sb = new StringBuilder(); - foreach (var arg in args) - { - if (sb.Length > 0) - sb.Append(" "); - - sb.Append(arg.FromUtf8Bytes()); - } - this.lastCommand = sb.ToString(); - if (this.lastCommand.Length > 100) - { - this.lastCommand = this.lastCommand.Substring(0, 100) + "..."; - } - - log.Debug("S: " + this.lastCommand); - } - - protected void ExpectSuccess() - { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - - Log((char)c + s); - - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") && s.Length >= 4 ? s.Substring(4) : s); - } - - private void ExpectWord(string word) - { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - - Log((char)c + s); - - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); - - if (s != word) - throw CreateResponseError(string.Format("Expected '{0}' got '{1}'", word, s)); - } - - private string ExpectCode() - { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - - Log((char)c + s); - - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); - - return s; - } - - protected void ExpectOk() - { - ExpectWord("OK"); - } - - protected void ExpectQueued() - { - ExpectWord("QUEUED"); - } - - public int ReadInt() - { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - - Log("R: " + s); - - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); - - if (c == ':' || c == '$')//really strange why ZRANK needs the '$' here - { - int i; - if (int.TryParse(s, out i)) - return i; - } - throw CreateResponseError("Unknown reply on integer response: " + c + s); - } - - private byte[] ReadData() - { - string r = ReadLine(); - - Log("R: {0}", r); - if (r.Length == 0) - throw CreateResponseError("Zero length respose"); - - char c = r[0]; - if (c == '-') - throw CreateResponseError(r.StartsWith("-ERR") ? r.Substring(5) : r.Substring(1)); - - if (c == '$') - { - if (r == "$-1") - return null; - int count; - - if (Int32.TryParse(r.Substring(1), out count)) - { - var retbuf = new byte[count]; - - var offset = 0; - while (count > 0) - { - var readCount = Bstream.Read(retbuf, offset, count); - if (readCount <= 0) - throw CreateResponseError("Unexpected end of Stream"); - - offset += readCount; - count -= readCount; - } - - if (Bstream.ReadByte() != '\r' || Bstream.ReadByte() != '\n') - throw CreateResponseError("Invalid termination"); - - return retbuf; - } - throw CreateResponseError("Invalid length"); - } - - if (c == ':') - { - //match the return value - return r.Substring(1).ToUtf8Bytes(); - } - throw CreateResponseError("Unexpected reply: " + r); - } - - private byte[][] ReadMultiData() - { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - Log("R: " + s); - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); - if (c == '*') - { - int count; - if (int.TryParse(s, out count)) - { - if (count == -1) - { - //redis is in an invalid state - return new byte[0][]; - } - - var result = new byte[count][]; - - for (int i = 0; i < count; i++) - result[i] = ReadData(); - - return result; - } - } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); - } - - private int ReadMultiDataResultCount() - { - int c = SafeReadByte(); - if (c == -1) - throw CreateResponseError("No more data"); - - var s = ReadLine(); - Log("R: " + s); - if (c == '-') - throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); - if (c == '*') - { - int count; - if (int.TryParse(s, out count)) - { - return count; - } - } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); - } - - private static void AssertListIdAndValue(string listId, byte[] value) - { - if (listId == null) - throw new ArgumentNullException("listId"); - if (value == null) - throw new ArgumentNullException("value"); - } - - private static byte[][] MergeCommandWithKeysAndValues(byte[] cmd, byte[][] keys, byte[][] values) - { - var firstParams = new[] { cmd }; - return MergeCommandWithKeysAndValues(firstParams, keys, values); - } - - private static byte[][] MergeCommandWithKeysAndValues(byte[] cmd, byte[] firstArg, byte[][] keys, byte[][] values) - { - var firstParams = new[] { cmd, firstArg }; - return MergeCommandWithKeysAndValues(firstParams, keys, values); - } - - private static byte[][] MergeCommandWithKeysAndValues(byte[][] firstParams, - byte[][] keys, byte[][] values) - { - if (keys == null || keys.Length == 0) - throw new ArgumentNullException("keys"); - if (values == null || values.Length == 0) - throw new ArgumentNullException("values"); - if (keys.Length != values.Length) - throw new ArgumentException("The number of values must be equal to the number of keys"); - - var keyValueStartIndex = (firstParams != null) ? firstParams.Length : 0; - - var keysAndValuesLength = keys.Length * 2 + keyValueStartIndex; - var keysAndValues = new byte[keysAndValuesLength][]; - - for (var i = 0; i < keyValueStartIndex; i++) - { - keysAndValues[i] = firstParams[i]; - } - - var j = 0; - for (var i = keyValueStartIndex; i < keysAndValuesLength; i += 2) - { - keysAndValues[i] = keys[j]; - keysAndValues[i + 1] = values[j]; - j++; - } - return keysAndValues; - } - - private static byte[][] MergeCommandWithArgs(byte[] cmd, params string[] args) - { - var mergedBytes = new byte[1 + args.Length][]; - mergedBytes[0] = cmd; - for (var i = 0; i < args.Length; i++) - { - mergedBytes[i + 1] = args[i].ToUtf8Bytes(); - } - return mergedBytes; - } - - private static byte[][] MergeCommandWithArgs(byte[] cmd, byte[] firstArg, params byte[][] args) - { - var mergedBytes = new byte[2 + args.Length][]; - mergedBytes[0] = cmd; - mergedBytes[1] = firstArg; - for (var i = 0; i < args.Length; i++) - { - mergedBytes[i + 2] = args[i]; - } - return mergedBytes; - } - - protected byte[][] ConvertToBytes(string[] keys) - { - var keyBytes = new byte[keys.Length][]; - for (var i = 0; i < keys.Length; i++) - { - var key = keys[i]; - keyBytes[i] = key != null ? key.ToUtf8Bytes() : new byte[0]; - } - return keyBytes; - } - - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisPipeline.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisPipeline.cs deleted file mode 100644 index 1169438e..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisPipeline.cs +++ /dev/null @@ -1,45 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - public class RedisPipelineCommand - { - private readonly RedisNativeClient client; - private int cmdCount; - - public RedisPipelineCommand(RedisNativeClient client) - { - this.client = client; - } - - public void WriteCommand(params byte[][] cmdWithBinaryArgs) - { - client.WriteAllToSendBuffer(cmdWithBinaryArgs); - cmdCount++; - } - - public List ReadAllAsInts() - { - var results = new List(); - while (cmdCount-- > 0) - { - results.Add(client.ReadInt()); - } - - return results; - } - - public bool ReadAllAsIntsHaveSuccess() - { - var allResults = ReadAllAsInts(); - return allResults.All(x => x == RedisNativeClient.Success); - } - - public void Flush() - { - client.FlushSendBuffer(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisResponseException.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisResponseException.cs deleted file mode 100644 index ef4ce166..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisResponseException.cs +++ /dev/null @@ -1,30 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -namespace ServiceStack.Redis -{ - public class RedisResponseException - : RedisException - { - public RedisResponseException(string message) - : base(message) - { - } - - public RedisResponseException(string message, string code) : base(message) - { - Code = code; - } - - public string Code { get; private set; } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisSubscription.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisSubscription.cs deleted file mode 100644 index 5564c2f4..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisSubscription.cs +++ /dev/null @@ -1,128 +0,0 @@ -using System; -using System.Collections.Generic; -using ServiceStack.Common.Extensions; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - public class RedisSubscription - : IRedisSubscription - { - private readonly RedisClient redisClient; - private List activeChannels; - public int SubscriptionCount { get; private set; } - - private static readonly byte[] SubscribeWord = "subscribe".ToUtf8Bytes(); - private static readonly byte[] UnSubscribeWord = "unsubscribe".ToUtf8Bytes(); - private static readonly byte[] MessageWord = "message".ToUtf8Bytes(); - - public RedisSubscription(RedisClient redisClient) - { - this.redisClient = redisClient; - - this.SubscriptionCount = 0; - this.activeChannels = new List(); - } - - public Action OnSubscribe { get; set; } - public Action OnMessage { get; set; } - public Action OnUnSubscribe { get; set; } - - public void SubscribeToChannels(params string[] channels) - { - var multiBytes = redisClient.Subscribe(channels); - ParseSubscriptionResults(multiBytes); - - while (this.SubscriptionCount > 0) - { - multiBytes = redisClient.ReceiveMessages(); - ParseSubscriptionResults(multiBytes); - } - } - - public void SubscribeToChannelsMatching(params string[] patterns) - { - var multiBytes = redisClient.Subscribe(patterns); - ParseSubscriptionResults(multiBytes); - - while (this.SubscriptionCount > 0) - { - multiBytes = redisClient.ReceiveMessages(); - ParseSubscriptionResults(multiBytes); - } - } - - private void ParseSubscriptionResults(byte[][] multiBytes) - { - for (var i = 0; i < multiBytes.Length; i += 3) - { - var messageType = multiBytes[i]; - var channel = multiBytes[i + 1].FromUtf8Bytes(); - - if (SubscribeWord.AreEqual(messageType)) - { - this.SubscriptionCount = int.Parse(multiBytes[i + 2].FromUtf8Bytes()); - - activeChannels.Add(channel); - - if (this.OnSubscribe != null) - { - this.OnSubscribe(channel); - } - } - else if (UnSubscribeWord.AreEqual(messageType)) - { - this.SubscriptionCount = int.Parse(multiBytes[i + 2].FromUtf8Bytes()); - - activeChannels.Remove(channel); - - if (this.OnUnSubscribe != null) - { - this.OnUnSubscribe(channel); - } - } - else if (MessageWord.AreEqual(messageType)) - { - var message = multiBytes[i + 2].FromUtf8Bytes(); - - if (this.OnMessage != null) - { - this.OnMessage(channel, message); - } - } - else - { - throw new RedisException( - "Invalid state. Expected [subscribe|unsubscribe|message] got: " + messageType); - } - } - } - - public void UnSubscribeFromAllChannels() - { - if (activeChannels.Count == 0) return; - - var multiBytes = redisClient.UnSubscribe(); - ParseSubscriptionResults(multiBytes); - - this.activeChannels = new List(); - } - - public void UnSubscribeFromChannels(params string[] channels) - { - var multiBytes = redisClient.UnSubscribe(channels); - ParseSubscriptionResults(multiBytes); - } - - public void UnSubscribeFromChannelsMatching(params string[] patterns) - { - var multiBytes = redisClient.UnSubscribe(patterns); - ParseSubscriptionResults(multiBytes); - } - - public void Dispose() - { - UnSubscribeFromAllChannels(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisTransaction.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisTransaction.cs deleted file mode 100644 index 1e3936cd..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/RedisTransaction.cs +++ /dev/null @@ -1,324 +0,0 @@ -// -// https://github.com/mythz/ServiceStack.Redis -// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system -// -// Authors: -// Demis Bellot (demis.bellot@gmail.com) -// -// Copyright 2010 Liquidbit Ltd. -// -// Licensed under the same terms of Redis and ServiceStack: new BSD license. -// - -using System; -using System.Collections.Generic; - -namespace ServiceStack.Redis -{ - /// - /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - /// - public class RedisTransaction - : IRedisTransaction, IRedisQueableTransaction - { - private readonly List queuedCommands = new List(); - - private readonly RedisClient redisClient; - private QueuedRedisOperation currentQueuedOperation; - - public RedisTransaction(RedisClient redisClient) - { - this.redisClient = redisClient; - - if (redisClient.CurrentTransaction != null) - throw new InvalidOperationException("An atomic command is already in use"); - - redisClient.Multi(); - redisClient.CurrentTransaction = this; - } - - private void BeginQueuedCommand(QueuedRedisOperation queuedRedisOperation) - { - if (currentQueuedOperation != null) - throw new InvalidOperationException("The previous queued operation has not been commited"); - - currentQueuedOperation = queuedRedisOperation; - } - - private void AssertCurrentOperation() - { - if (currentQueuedOperation == null) - throw new InvalidOperationException("No queued operation is currently set"); - } - - private void AddCurrentQueuedOperation() - { - this.queuedCommands.Add(currentQueuedOperation); - currentQueuedOperation = null; - } - - public void CompleteVoidQueuedCommand(Action voidReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.VoidReadCommand = voidReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteIntQueuedCommand(Func intReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.IntReadCommand = intReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteBytesQueuedCommand(Func bytesReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.BytesReadCommand = bytesReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteMultiBytesQueuedCommand(Func multiBytesReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.MultiBytesReadCommand = multiBytesReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteStringQueuedCommand(Func stringReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.StringReadCommand = stringReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteMultiStringQueuedCommand(Func> multiStringReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.MultiStringReadCommand = multiStringReadCommand; - AddCurrentQueuedOperation(); - } - - public void CompleteDoubleQueuedCommand(Func doubleReadCommand) - { - AssertCurrentOperation(); - - currentQueuedOperation.DoubleReadCommand = doubleReadCommand; - AddCurrentQueuedOperation(); - } - - - public void QueueCommand(Action command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Action command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Action command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessVoidCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessIntCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessBoolCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessDoubleCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessBytesCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessStringCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessMultiBytesCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void QueueCommand(Func> command) - { - QueueCommand(command, null, null); - } - - public void QueueCommand(Func> command, Action> onSuccessCallback) - { - QueueCommand(command, onSuccessCallback, null); - } - - public void QueueCommand(Func> command, Action> onSuccessCallback, Action onErrorCallback) - { - BeginQueuedCommand(new QueuedRedisOperation - { - OnSuccessMultiStringCallback = onSuccessCallback, - OnErrorCallback = onErrorCallback - }); - command(redisClient); - } - - - public void Commit() - { - try - { - var resultCount = redisClient.Exec(); - if (resultCount != queuedCommands.Count) - throw new InvalidOperationException(string.Format( - "Invalid results received from 'EXEC', expected '{0}' received '{1}'" - + "\nWarning: Transaction was committed", - queuedCommands.Count, resultCount)); - - foreach (var queuedCommand in queuedCommands) - { - queuedCommand.ProcessResult(); - } - } - finally - { - redisClient.CurrentTransaction = null; - redisClient.AddTypeIdsRegisteredDuringTransaction(); - } - } - - public void Rollback() - { - if (redisClient.CurrentTransaction == null) - throw new InvalidOperationException("There is no current transaction to Rollback"); - - redisClient.CurrentTransaction = null; - redisClient.ClearTypeIdsRegisteredDuringTransaction(); - redisClient.Discard(); - } - - public void Dispose() - { - if (redisClient.CurrentTransaction == null) return; - Rollback(); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/ServiceStack.Redis.csproj b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/ServiceStack.Redis.csproj deleted file mode 100644 index 00f75c33..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/ServiceStack.Redis.csproj +++ /dev/null @@ -1,167 +0,0 @@ - - - - Debug - AnyCPU - 9.0.30729 - 2.0 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - Library - Properties - ServiceStack.Redis - ServiceStack.Redis - v3.5 - 512 - - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - - - true - bin\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - - - true - bin\MonoTouch\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - - - - False - ..\..\lib\ServiceStack.Client.dll - - - False - ..\..\lib\ServiceStack.Common.dll - - - False - ..\..\lib\ServiceStack.Interfaces.dll - - - False - ..\..\lib\ServiceStack.Messaging.dll - - - False - ..\..\lib\ServiceStack.Text.dll - - - - 3.5 - - - 3.5 - - - 3.5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Code - - - - - - - \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/ServiceStack.Redis.csproj.user b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/ServiceStack.Redis.csproj.user deleted file mode 100644 index a1d742b1..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/ServiceStack.Redis.csproj.user +++ /dev/null @@ -1,5 +0,0 @@ - - - ProjectFiles - - \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/SortOptions.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/SortOptions.cs deleted file mode 100644 index 7e72f268..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/SortOptions.cs +++ /dev/null @@ -1,13 +0,0 @@ -namespace ServiceStack.Redis -{ - public class SortOptions - { - public string SortPattern { get; set; } - public int? Skip { get; set; } - public int? Take { get; set; } - public string GetPattern { get; set; } - public bool SortAlpha { get; set; } - public bool SortDesc { get; set; } - public string StoreAtKey { get; set; } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Support/IOrderedDictionary.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Support/IOrderedDictionary.cs deleted file mode 100644 index ba017161..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Support/IOrderedDictionary.cs +++ /dev/null @@ -1,58 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Collections.Specialized; - -namespace ServiceStack.Redis.Support -{ - /// - /// Represents a generic collection of key/value pairs that are ordered independently of the key and value. - /// - /// The type of the keys in the dictionary - /// The type of the values in the dictionary - public interface IOrderedDictionary - : IOrderedDictionary, IDictionary - { - /// - /// Adds an entry with the specified key and value into the IOrderedDictionary<TKey,TValue> collection with the lowest available index. - /// - /// The key of the entry to add. - /// The value of the entry to add. - /// The index of the newly added entry - /// - /// You can also use the property to add new elements by setting the value of a key that does not exist in the IOrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the IOrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - /// An element with the same key already exists in the IOrderedDictionary<TKey,TValue> - /// The IOrderedDictionary<TKey,TValue> is read-only.
- /// -or-
- /// The IOrderedDictionary<TKey,TValue> has a fized size.
- new int Add(TKey key, TValue value); - - /// - /// Inserts a new entry into the IOrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - /// - /// The zero-based index at which the element should be inserted. - /// The key of the entry to add. - /// The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - /// is less than 0.
- /// -or-
- /// is greater than .
- /// An element with the same key already exists in the IOrderedDictionary<TKey,TValue>. - /// The IOrderedDictionary<TKey,TValue> is read-only.
- /// -or-
- /// The IOrderedDictionary<TKey,TValue> has a fized size.
- void Insert(int index, TKey key, TValue value); - - /// - /// Gets or sets the value at the specified index. - /// - /// The zero-based index of the value to get or set. - /// The value of the item at the specified index. - /// is less than 0.
- /// -or-
- /// is equal to or greater than .
- new TValue this[int index] - { - get; - set; - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Support/OrderedDictionary.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Support/OrderedDictionary.cs deleted file mode 100644 index 654f446b..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/Support/OrderedDictionary.cs +++ /dev/null @@ -1,642 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Collections.Specialized; - -namespace ServiceStack.Redis.Support -{ - /// - /// Represents a generic collection of key/value pairs that are ordered independently of the key and value. - /// - /// The type of the keys in the dictionary - /// The type of the values in the dictionary - public class OrderedDictionary : IOrderedDictionary - { - private const int DefaultInitialCapacity = 0; - - private static readonly string KeyTypeName = typeof(TKey).FullName; - private static readonly string ValueTypeName = typeof(TValue).FullName; - private static readonly bool ValueTypeIsReferenceType = !typeof(ValueType).IsAssignableFrom(typeof(TValue)); - - private Dictionary dictionary; - private List> list; - private readonly IEqualityComparer comparer; - private object syncRoot; - private readonly int initialCapacity; - - /// - /// Initializes a new instance of the OrderedDictionary<TKey,TValue> class. - /// - public OrderedDictionary() - : this(DefaultInitialCapacity, null) - { - } - - /// - /// Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified initial capacity. - /// - /// The initial number of elements that the OrderedDictionary<TKey,TValue> can contain. - /// is less than 0 - public OrderedDictionary(int capacity) - : this(capacity, null) - { - } - - /// - /// Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified comparer. - /// - /// The IEqualityComparer<TKey> to use when comparing keys, or to use the default EqualityComparer<TKey> for the type of the key. - public OrderedDictionary(IEqualityComparer comparer) - : this(DefaultInitialCapacity, comparer) - { - } - - /// - /// Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified initial capacity and comparer. - /// - /// The initial number of elements that the OrderedDictionary<TKey,TValue> collection can contain. - /// The IEqualityComparer<TKey> to use when comparing keys, or to use the default EqualityComparer<TKey> for the type of the key. - /// is less than 0 - public OrderedDictionary(int capacity, IEqualityComparer comparer) - { - if (0 > capacity) - throw new ArgumentOutOfRangeException("capacity", "'capacity' must be non-negative"); - - initialCapacity = capacity; - this.comparer = comparer; - } - - /// - /// Converts the object passed as a key to the key type of the dictionary - /// - /// The key object to check - /// The key object, cast as the key type of the dictionary - /// is . - /// The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - private static TKey ConvertToKeyType(object keyObject) - { - if (null == keyObject) - { - throw new ArgumentNullException("key"); - } - else - { - if (keyObject is TKey) - return (TKey)keyObject; - } - throw new ArgumentException("'key' must be of type " + KeyTypeName, "key"); - } - - /// - /// Converts the object passed as a value to the value type of the dictionary - /// - /// The object to convert to the value type of the dictionary - /// The value object, converted to the value type of the dictionary - /// is , and the value type of the OrderedDictionary<TKey,TValue> is a value type. - /// The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - private static TValue ConvertToValueType(object value) - { - if (null == value) - { - if (ValueTypeIsReferenceType) - return default(TValue); - else - throw new ArgumentNullException("value"); - } - else - { - if (value is TValue) - return (TValue)value; - } - throw new ArgumentException("'value' must be of type " + ValueTypeName, "value"); - } - - /// - /// Gets the dictionary object that stores the keys and values - /// - /// The dictionary object that stores the keys and values for the OrderedDictionary<TKey,TValue> - /// Accessing this property will create the dictionary object if necessary - private Dictionary Dictionary - { - get - { - if (null == dictionary) - { - dictionary = new Dictionary(initialCapacity, comparer); - } - return dictionary; - } - } - - /// - /// Gets the list object that stores the key/value pairs. - /// - /// The list object that stores the key/value pairs for the OrderedDictionary<TKey,TValue> - /// Accessing this property will create the list object if necessary. - private List> List - { - get - { - if (null == list) - { - list = new List>(initialCapacity); - } - return list; - } - } - - IDictionaryEnumerator IOrderedDictionary.GetEnumerator() - { - return Dictionary.GetEnumerator(); - } - - IDictionaryEnumerator IDictionary.GetEnumerator() - { - return Dictionary.GetEnumerator(); - } - - IEnumerator IEnumerable.GetEnumerator() - { - return List.GetEnumerator(); - } - - IEnumerator> IEnumerable>.GetEnumerator() - { - return List.GetEnumerator(); - } - - /// - /// Inserts a new entry into the OrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - /// - /// The zero-based index at which the element should be inserted. - /// The key of the entry to add. - /// The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - /// is less than 0.
- /// -or-
- /// is greater than .
- /// is . - /// An element with the same key already exists in the OrderedDictionary<TKey,TValue>. - public void Insert(int index, TKey key, TValue value) - { - if (index > Count || index < 0) - throw new ArgumentOutOfRangeException("index"); - - Dictionary.Add(key, value); - List.Insert(index, new KeyValuePair(key, value)); - } - - /// - /// Inserts a new entry into the OrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - /// - /// The zero-based index at which the element should be inserted. - /// The key of the entry to add. - /// The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - /// is less than 0.
- /// -or-
- /// is greater than .
- /// is .
- /// -or-
- /// is , and the value type of the OrderedDictionary<TKey,TValue> is a value type.
- /// The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- /// -or-
- /// The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- /// -or-
- /// An element with the same key already exists in the OrderedDictionary<TKey,TValue>.
- void IOrderedDictionary.Insert(int index, object key, object value) - { - Insert(index, ConvertToKeyType(key), ConvertToValueType(value)); - } - - /// - /// Removes the entry at the specified index from the OrderedDictionary<TKey,TValue> collection. - /// - /// The zero-based index of the entry to remove. - /// is less than 0.
- /// -or-
- /// index is equal to or greater than .
- public void RemoveAt(int index) - { - if (index >= Count || index < 0) - throw new ArgumentOutOfRangeException("index", "'index' must be non-negative and less than the size of the collection"); - - TKey key = List[index].Key; - - List.RemoveAt(index); - Dictionary.Remove(key); - } - - /// - /// Gets or sets the value at the specified index. - /// - /// The zero-based index of the value to get or set. - /// The value of the item at the specified index. - /// is less than 0.
- /// -or-
- /// index is equal to or greater than .
- public TValue this[int index] - { - get - { - return List[index].Value; - } - - set - { - if (index >= Count || index < 0) - throw new ArgumentOutOfRangeException("index", "'index' must be non-negative and less than the size of the collection"); - - TKey key = List[index].Key; - - List[index] = new KeyValuePair(key, value); - Dictionary[key] = value; - } - } - - /// - /// Gets or sets the value at the specified index. - /// - /// The zero-based index of the value to get or set. - /// The value of the item at the specified index. - /// is less than 0.
- /// -or-
- /// index is equal to or greater than .
- /// is a null reference, and the value type of the OrderedDictionary<TKey,TValue> is a value type. - /// The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - object IOrderedDictionary.this[int index] - { - get - { - return this[index]; - } - - set - { - this[index] = ConvertToValueType(value); - } - } - - /// - /// Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - /// - /// The key of the entry to add. - /// The value of the entry to add. This value can be . - /// A key cannot be , but a value can be. - /// You can also use the property to add new elements by setting the value of a key that does not exist in the OrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the OrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - /// is - /// An element with the same key already exists in the OrderedDictionary<TKey,TValue> - void IDictionary.Add(TKey key, TValue value) - { - Add(key, value); - } - - /// - /// Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - /// - /// The key of the entry to add. - /// The value of the entry to add. This value can be . - /// The index of the newly added entry - /// A key cannot be , but a value can be. - /// You can also use the property to add new elements by setting the value of a key that does not exist in the OrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the OrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - /// is - /// An element with the same key already exists in the OrderedDictionary<TKey,TValue> - public int Add(TKey key, TValue value) - { - Dictionary.Add(key, value); - List.Add(new KeyValuePair(key, value)); - return Count - 1; - } - - /// - /// Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - /// - /// The key of the entry to add. - /// The value of the entry to add. This value can be . - /// is .
- /// -or-
- /// is , and the value type of the OrderedDictionary<TKey,TValue> is a value type.
- /// The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- /// -or-
- /// The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- void IDictionary.Add(object key, object value) - { - Add(ConvertToKeyType(key), ConvertToValueType(value)); - } - - /// - /// Removes all elements from the OrderedDictionary<TKey,TValue> collection. - /// - /// The capacity is not changed as a result of calling this method. - public void Clear() - { - Dictionary.Clear(); - List.Clear(); - } - - /// - /// Determines whether the OrderedDictionary<TKey,TValue> collection contains a specific key. - /// - /// The key to locate in the OrderedDictionary<TKey,TValue> collection. - /// if the OrderedDictionary<TKey,TValue> collection contains an element with the specified key; otherwise, . - /// is - public bool ContainsKey(TKey key) - { - return Dictionary.ContainsKey(key); - } - /// - /// Determines whether the OrderedDictionary<TKey,TValue> collection contains a specific key. - /// - /// The key to locate in the OrderedDictionary<TKey,TValue> collection. - /// if the OrderedDictionary<TKey,TValue> collection contains an element with the specified key; otherwise, . - /// is - /// The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - bool IDictionary.Contains(object key) - { - return ContainsKey(ConvertToKeyType(key)); - } - - /// - /// Gets a value indicating whether the OrderedDictionary<TKey,TValue> has a fixed size. - /// - /// if the OrderedDictionary<TKey,TValue> has a fixed size; otherwise, . The default is . - bool IDictionary.IsFixedSize - { - get - { - return false; - } - } - - /// - /// Gets a value indicating whether the OrderedDictionary<TKey,TValue> collection is read-only. - /// - /// if the OrderedDictionary<TKey,TValue> is read-only; otherwise, . The default is . - /// - /// A collection that is read-only does not allow the addition, removal, or modification of elements after the collection is created. - /// A collection that is read-only is simply a collection with a wrapper that prevents modification of the collection; therefore, if changes are made to the underlying collection, the read-only collection reflects those changes. - /// - public bool IsReadOnly - { - get - { - return false; - } - } - - /// - /// Gets an object containing the keys in the OrderedDictionary<TKey,TValue>. - /// - /// An object containing the keys in the OrderedDictionary<TKey,TValue>. - /// The returned object is not a static copy; instead, the collection refers back to the keys in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the key collection. - ICollection IDictionary.Keys - { - get - { - return (ICollection)Keys; - } - } - - /// - /// Returns the zero-based index of the specified key in the OrderedDictionary<TKey,TValue> - /// - /// The key to locate in the OrderedDictionary<TKey,TValue> - /// The zero-based index of , if is found in the OrderedDictionary<TKey,TValue>; otherwise, -1 - /// This method performs a linear search; therefore it has a cost of O(n) at worst. - public int IndexOfKey(TKey key) - { - if (null == key) - throw new ArgumentNullException("key"); - - for (int index = 0; index < List.Count; index++) - { - KeyValuePair entry = List[index]; - TKey next = entry.Key; - if (null != comparer) - { - if (comparer.Equals(next, key)) - { - return index; - } - } - else if (next.Equals(key)) - { - return index; - } - } - - return -1; - } - - /// - /// Removes the entry with the specified key from the OrderedDictionary<TKey,TValue> collection. - /// - /// The key of the entry to remove - /// if the key was found and the corresponding element was removed; otherwise, - public bool Remove(TKey key) - { - if (null == key) - throw new ArgumentNullException("key"); - - int index = IndexOfKey(key); - if (index >= 0) - { - if (Dictionary.Remove(key)) - { - List.RemoveAt(index); - return true; - } - } - return false; - } - - /// - /// Removes the entry with the specified key from the OrderedDictionary<TKey,TValue> collection. - /// - /// The key of the entry to remove - void IDictionary.Remove(object key) - { - Remove(ConvertToKeyType(key)); - } - - /// - /// Gets an object containing the values in the OrderedDictionary<TKey,TValue> collection. - /// - /// An object containing the values in the OrderedDictionary<TKey,TValue> collection. - /// The returned object is not a static copy; instead, the refers back to the values in the original OrderedDictionary<TKey,TValue> collection. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the . - ICollection IDictionary.Values - { - get - { - return (ICollection)Values; - } - } - - /// - /// Gets or sets the value with the specified key. - /// - /// The key of the value to get or set. - /// The value associated with the specified key. If the specified key is not found, attempting to get it returns , and attempting to set it creates a new element using the specified key. - public TValue this[TKey key] - { - get - { - return Dictionary[key]; - } - set - { - if (Dictionary.ContainsKey(key)) - { - Dictionary[key] = value; - List[IndexOfKey(key)] = new KeyValuePair(key, value); - } - else - { - Add(key, value); - } - } - } - - /// - /// Gets or sets the value with the specified key. - /// - /// The key of the value to get or set. - /// The value associated with the specified key. If the specified key is not found, attempting to get it returns , and attempting to set it creates a new element using the specified key. - object IDictionary.this[object key] - { - get - { - return this[ConvertToKeyType(key)]; - } - set - { - this[ConvertToKeyType(key)] = ConvertToValueType(value); - } - } - - /// - /// Copies the elements of the OrderedDictionary<TKey,TValue> elements to a one-dimensional Array object at the specified index. - /// - /// The one-dimensional object that is the destination of the objects copied from the OrderedDictionary<TKey,TValue>. The must have zero-based indexing. - /// The zero-based index in at which copying begins. - /// The method preserves the order of the elements in the OrderedDictionary<TKey,TValue> - void ICollection.CopyTo(Array array, int index) - { - ((ICollection)List).CopyTo(array, index); - } - - /// - /// Gets the number of key/values pairs contained in the OrderedDictionary<TKey,TValue> collection. - /// - /// The number of key/value pairs contained in the OrderedDictionary<TKey,TValue> collection. - public int Count - { - get - { - return List.Count; - } - } - - /// - /// Gets a value indicating whether access to the OrderedDictionary<TKey,TValue> object is synchronized (thread-safe). - /// - /// This method always returns false. - bool ICollection.IsSynchronized - { - get - { - return false; - } - } - - /// - /// Gets an object that can be used to synchronize access to the OrderedDictionary<TKey,TValue> object. - /// - /// An object that can be used to synchronize access to the OrderedDictionary<TKey,TValue> object. - object ICollection.SyncRoot - { - get - { - if (this.syncRoot == null) - { - System.Threading.Interlocked.CompareExchange(ref this.syncRoot, new object(), null); - } - return this.syncRoot; - } - } - - /// - /// Gets an ICollection<TKey> object containing the keys in the OrderedDictionary<TKey,TValue>. - /// - /// An ICollection<TKey> object containing the keys in the OrderedDictionary<TKey,TValue>. - /// The returned ICollection<TKey> object is not a static copy; instead, the collection refers back to the keys in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the key collection. - public ICollection Keys - { - get - { - return Dictionary.Keys; - } - } - - /// - /// Gets the value associated with the specified key. - /// - /// The key of the value to get. - /// When this method returns, contains the value associated with the specified key, if the key is found; otherwise, the default value for the type of . This parameter can be passed uninitialized. - /// if the OrderedDictionary<TKey,TValue> contains an element with the specified key; otherwise, . - public bool TryGetValue(TKey key, out TValue value) - { - return Dictionary.TryGetValue(key, out value); - } - - /// - /// Gets an ICollection<TValue> object containing the values in the OrderedDictionary<TKey,TValue>. - /// - /// An ICollection<TValue> object containing the values in the OrderedDictionary<TKey,TValue>. - /// The returned ICollection<TKey> object is not a static copy; instead, the collection refers back to the values in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the value collection. - public ICollection Values - { - get - { - return Dictionary.Values; - } - } - - /// - /// Adds the specified value to the OrderedDictionary<TKey,TValue> with the specified key. - /// - /// The KeyValuePair<TKey,TValue> structure representing the key and value to add to the OrderedDictionary<TKey,TValue>. - void ICollection>.Add(KeyValuePair item) - { - Add(item.Key, item.Value); - } - - /// - /// Determines whether the OrderedDictionary<TKey,TValue> contains a specific key and value. - /// - /// The KeyValuePair<TKey,TValue> structure to locate in the OrderedDictionary<TKey,TValue>. - /// if is found in the OrderedDictionary<TKey,TValue>; otherwise, . - bool ICollection>.Contains(KeyValuePair item) - { - return ((ICollection>)Dictionary).Contains(item); - } - - /// - /// Copies the elements of the OrderedDictionary<TKey,TValue> to an array of type , starting at the specified index. - /// - /// The one-dimensional array of type KeyValuePair<TKey,TValue> that is the destination of the KeyValuePair<TKey,TValue> elements copied from the OrderedDictionary<TKey,TValue>. The array must have zero-based indexing. - /// The zero-based index in at which copying begins. - void ICollection>.CopyTo(KeyValuePair[] array, int arrayIndex) - { - ((ICollection>)Dictionary).CopyTo(array, arrayIndex); - } - - /// - /// Removes a key and value from the dictionary. - /// - /// The KeyValuePair<TKey,TValue> structure representing the key and value to remove from the OrderedDictionary<TKey,TValue>. - /// if the key and value represented by is successfully found and removed; otherwise, . This method returns if is not found in the OrderedDictionary<TKey,TValue>. - bool ICollection>.Remove(KeyValuePair item) - { - return Remove(item.Key); - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/UtilExtensions.cs b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/UtilExtensions.cs deleted file mode 100644 index 33ddf1c1..00000000 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis/UtilExtensions.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using ServiceStack.Text; - -namespace ServiceStack.Redis -{ - internal static class UtilExtensions - { - public static List ConvertEachTo(this IEnumerable list) - { - var to = new List(); - foreach (var item in list) - { - to.Add(JsonSerializer.DeserializeFromString(item)); - } - return to; - } - } -} diff --git a/tests/ServiceStack.Redis.Tests/Shared/BuiltInsFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/BuiltInsFactory.cs new file mode 100644 index 00000000..1c6b1859 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/BuiltInsFactory.cs @@ -0,0 +1,55 @@ +using System; +using System.Collections.Generic; +using NUnit.Framework; + +namespace ServiceStack.Common.Tests.Models +{ + public class BuiltInsFactory + : ModelFactoryBase + { + readonly string[] StringValues = new[] { + "one", "two", "three", "four", + "five", "six", "seven" + }; + + public override void AssertIsEqual(string actual, string expected) + { + Assert.That(actual, Is.EqualTo(expected)); + } + + public override string CreateInstance(int i) + { + return i < StringValues.Length + ? StringValues[i] + : i.ToString(); + } + } + + public class IntFactory + : ModelFactoryBase + { + public override void AssertIsEqual(int actual, int expected) + { + Assert.That(actual, Is.EqualTo(expected)); + } + + public override int CreateInstance(int i) + { + return i; + } + } + + public class DateTimeFactory + : ModelFactoryBase + { + public override void AssertIsEqual(DateTime actual, DateTime expected) + { + Assert.That(actual, Is.EqualTo(expected)); + } + + public override DateTime CreateInstance(int i) + { + return new DateTime(i, DateTimeKind.Utc); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/IModelFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/IModelFactory.cs new file mode 100644 index 00000000..dbfb4c01 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/IModelFactory.cs @@ -0,0 +1,16 @@ +using System.Collections.Generic; + +namespace ServiceStack.Common.Tests.Models +{ + public interface IModelFactory + { + void AssertListsAreEqual(List actualList, IList expectedList); + void AssertIsEqual(T actual, T expected); + + T ExistingValue { get; } + T NonExistingValue { get; } + List CreateList(); + List CreateList2(); + T CreateInstance(int i); + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelFactoryBase.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelFactoryBase.cs new file mode 100644 index 00000000..a442f96c --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelFactoryBase.cs @@ -0,0 +1,62 @@ +using System.Collections.Generic; +using NUnit.Framework; + +namespace ServiceStack.Common.Tests.Models +{ + public abstract class ModelFactoryBase + : IModelFactory + { + #region Implementation of IModelFactory + + public void AssertListsAreEqual(List actualList, IList expectedList) + { + Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); + var i = 0; + + actualList.ForEach(x => + AssertIsEqual(x, expectedList[i++])); + } + + public abstract T CreateInstance(int i); + + public abstract void AssertIsEqual(T actual, T expected); + + public T ExistingValue + { + get + { + return CreateInstance(4); + } + } + + public T NonExistingValue + { + get + { + return CreateInstance(5); + } + } + + public List CreateList() + { + return new List + { + CreateInstance(1), + CreateInstance(2), + CreateInstance(3), + CreateInstance(4), + }; + } + public List CreateList2() + { + return new List + { + CreateInstance(5), + CreateInstance(6), + CreateInstance(7), + }; + } + + #endregion + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithComplexTypes.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithComplexTypes.cs new file mode 100644 index 00000000..e06a3c07 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithComplexTypes.cs @@ -0,0 +1,78 @@ +using System; +using System.Collections.Generic; +using NUnit.Framework; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithComplexTypes + { + public ModelWithComplexTypes() + { + this.StringList = new List(); + this.IntList = new List(); + this.StringMap = new Dictionary(); + this.IntMap = new Dictionary(); + } + + public long Id { get; set; } + + public List StringList { get; set; } + + public List IntList { get; set; } + + public Dictionary StringMap { get; set; } + + public Dictionary IntMap { get; set; } + + public ModelWithComplexTypes Child { get; set; } + + public static ModelWithComplexTypes Create(int id) + { + var row = new ModelWithComplexTypes + { + Id = id, + StringList = { "val" + id + 1, "val" + id + 2, "val" + id + 3 }, + IntList = { id + 1, id + 2, id + 3 }, + StringMap = + { + {"key" + id + 1, "val" + id + 1}, + {"key" + id + 2, "val" + id + 2}, + {"key" + id + 3, "val" + id + 3}, + }, + IntMap = + { + {id + 1, id + 2}, + {id + 3, id + 4}, + {id + 5, id + 6}, + }, + Child = new ModelWithComplexTypes { Id = id * 2 }, + }; + + return row; + } + + public static ModelWithComplexTypes CreateConstant(int i) + { + return Create(i); + } + + public static void AssertIsEqual(ModelWithComplexTypes actual, ModelWithComplexTypes expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.StringList, Is.EquivalentTo(expected.StringList)); + Assert.That(actual.IntList, Is.EquivalentTo(expected.IntList)); + Assert.That(actual.StringMap, Is.EquivalentTo(expected.StringMap)); + Assert.That(actual.IntMap, Is.EquivalentTo(expected.IntMap)); + + if (expected.Child == null) + { + Assert.That(actual.Child, Is.Null); + } + else + { + Assert.That(actual.Child, Is.Not.Null); + AssertIsEqual(actual.Child, expected.Child); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithComplexTypesFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithComplexTypesFactory.cs new file mode 100644 index 00000000..0f607dd6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithComplexTypesFactory.cs @@ -0,0 +1,20 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithComplexTypesFactory + : ModelFactoryBase + { + public static ModelWithComplexTypesFactory Instance + = new ModelWithComplexTypesFactory(); + + public override void AssertIsEqual( + ModelWithComplexTypes actual, ModelWithComplexTypes expected) + { + ModelWithComplexTypes.AssertIsEqual(actual, expected); + } + + public override ModelWithComplexTypes CreateInstance(int i) + { + return ModelWithComplexTypes.CreateConstant(i); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithCompositeIndexFields.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithCompositeIndexFields.cs new file mode 100644 index 00000000..b46fe000 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithCompositeIndexFields.cs @@ -0,0 +1,22 @@ +using ServiceStack.DataAnnotations; + +namespace ServiceStack.Common.Tests.Models +{ + [CompositeIndex(true, "Composite1", "Composite2")] + public class ModelWithCompositeIndexFields + { + public string Id { get; set; } + + [Index] + public string Name { get; set; } + + public string AlbumId { get; set; } + + [Index(true)] + public string UniqueName { get; set; } + + public string Composite1 { get; set; } + + public string Composite2 { get; set; } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentAndNullableTypes.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentAndNullableTypes.cs new file mode 100644 index 00000000..b6b7646f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentAndNullableTypes.cs @@ -0,0 +1,128 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.DataAnnotations; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithFieldsOfDifferentAndNullableTypes + { + private static readonly ILog Log = LogManager.GetLogger(typeof(ModelWithFieldsOfDifferentAndNullableTypes)); + + [AutoIncrement] + public int Id { get; set; } + public int? NId { get; set; } + + public long LongId { get; set; } + public long? NLongId { get; set; } + + public Guid Guid { get; set; } + public Guid? NGuid { get; set; } + + public bool Bool { get; set; } + public bool? NBool { get; set; } + + public DateTime DateTime { get; set; } + public DateTime? NDateTime { get; set; } + + public float Float { get; set; } + public float? NFloat { get; set; } + + public double Double { get; set; } + public double? NDouble { get; set; } + + public decimal Decimal { get; set; } + public decimal? NDecimal { get; set; } + + public TimeSpan TimeSpan { get; set; } + public TimeSpan? NTimeSpan { get; set; } + + public static ModelWithFieldsOfDifferentAndNullableTypes Create(int id) + { + var row = new ModelWithFieldsOfDifferentAndNullableTypes + { + Id = id, + Bool = id % 2 == 0, + DateTime = DateTime.Now.AddDays(id), + Float = 1.11f + id, + Double = 1.11d + id, + Guid = Guid.NewGuid(), + LongId = 999 + id, + Decimal = id + 0.5m, + TimeSpan = TimeSpan.FromSeconds(id), + }; + + return row; + } + + public static ModelWithFieldsOfDifferentAndNullableTypes CreateConstant(int id) + { + var row = new ModelWithFieldsOfDifferentAndNullableTypes + { + Id = id, + Bool = id % 2 == 0, + DateTime = new DateTime(1979, (id % 12) + 1, (id % 28) + 1), + Float = 1.11f + id, + Double = 1.11d + id, + Guid = new Guid(((id % 240) + 16).ToString("X") + "461D9D-47DB-4778-B3FA-458379AE9BDC"), + LongId = 999 + id, + Decimal = id + 0.5m, + TimeSpan = TimeSpan.FromSeconds(id), + }; + + return row; + } + + public static void AssertIsEqual(ModelWithFieldsOfDifferentAndNullableTypes actual, ModelWithFieldsOfDifferentAndNullableTypes expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.Guid, Is.EqualTo(expected.Guid)); + Assert.That(actual.LongId, Is.EqualTo(expected.LongId)); + Assert.That(actual.Bool, Is.EqualTo(expected.Bool)); + Assert.That(actual.TimeSpan, Is.EqualTo(expected.TimeSpan)); + + try + { + Assert.That(actual.DateTime, Is.EqualTo(expected.DateTime)); + } + catch (Exception ex) + { + Log.Error("Trouble with DateTime precisions, trying Assert again with rounding to seconds", ex); + Assert.That(actual.DateTime.RoundToSecond(), Is.EqualTo(expected.DateTime.RoundToSecond())); + } + + try + { + Assert.That(actual.Float, Is.EqualTo(expected.Float)); + } + catch (Exception ex) + { + Log.Error("Trouble with float precisions, trying Assert again with rounding to 10 decimals", ex); + Assert.That(Math.Round(actual.Float, 10), Is.EqualTo(Math.Round(actual.Float, 10))); + } + + try + { + Assert.That(actual.Double, Is.EqualTo(expected.Double)); + } + catch (Exception ex) + { + Log.Error("Trouble with double precisions, trying Assert again with rounding to 10 decimals", ex); + Assert.That(Math.Round(actual.Double, 10), Is.EqualTo(Math.Round(actual.Double, 10))); + } + + Assert.That(actual.NBool, Is.EqualTo(expected.NBool)); + Assert.That(actual.NDateTime, Is.EqualTo(expected.NDateTime)); + Assert.That(actual.NDecimal, Is.EqualTo(expected.NDecimal)); + Assert.That(actual.NDouble, Is.EqualTo(expected.NDouble)); + Assert.That(actual.NFloat, Is.EqualTo(expected.NFloat)); + Assert.That(actual.NGuid, Is.EqualTo(expected.NGuid)); + Assert.That(actual.NId, Is.EqualTo(expected.NId)); + Assert.That(actual.NLongId, Is.EqualTo(expected.NLongId)); + Assert.That(actual.NTimeSpan, Is.EqualTo(expected.NTimeSpan)); + + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentAndNullableTypesFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentAndNullableTypesFactory.cs new file mode 100644 index 00000000..bed7fcde --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentAndNullableTypesFactory.cs @@ -0,0 +1,20 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithFieldsOfDifferentAndNullableTypesFactory + : ModelFactoryBase + { + public static ModelWithFieldsOfDifferentAndNullableTypesFactory Instance + = new ModelWithFieldsOfDifferentAndNullableTypesFactory(); + + public override void AssertIsEqual( + ModelWithFieldsOfDifferentAndNullableTypes actual, ModelWithFieldsOfDifferentAndNullableTypes expected) + { + ModelWithFieldsOfDifferentAndNullableTypes.AssertIsEqual(actual, expected); + } + + public override ModelWithFieldsOfDifferentAndNullableTypes CreateInstance(int i) + { + return ModelWithFieldsOfDifferentAndNullableTypes.CreateConstant(i); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs new file mode 100644 index 00000000..74f4992d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs @@ -0,0 +1,181 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.DataAnnotations; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithFieldsOfDifferentTypesAsNullables + { + private static readonly ILog Log = LogManager.GetLogger(typeof(ModelWithFieldsOfDifferentTypesAsNullables)); + + public int? Id { get; set; } + + public string Name { get; set; } + + public long? LongId { get; set; } + + public Guid? Guid { get; set; } + + public bool? Bool { get; set; } + + public DateTime? DateTime { get; set; } + + public double? Double { get; set; } + + public static ModelWithFieldsOfDifferentTypesAsNullables Create(int id) + { + var row = new ModelWithFieldsOfDifferentTypesAsNullables + { + Id = id, + Bool = id % 2 == 0, + DateTime = System.DateTime.Now.AddDays(id), + Double = 1.11d + id, + Guid = System.Guid.NewGuid(), + LongId = 999 + id, + Name = "Name" + id + }; + + return row; + } + + public static ModelWithFieldsOfDifferentTypesAsNullables CreateConstant(int id) + { + var row = new ModelWithFieldsOfDifferentTypesAsNullables + { + Id = id, + Bool = id % 2 == 0, + DateTime = new DateTime(1979, (id % 12) + 1, (id % 28) + 1), + Double = 1.11d + id, + Guid = new Guid(((id % 240) + 16).ToString("X") + "726E3B-9983-40B4-A8CB-2F8ADA8C8760"), + LongId = 999 + id, + Name = "Name" + id + }; + + return row; + } + + public static void AssertIsEqual(ModelWithFieldsOfDifferentTypes actual, ModelWithFieldsOfDifferentTypesAsNullables expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id.Value)); + Assert.That(actual.Name, Is.EqualTo(expected.Name)); + Assert.That(actual.Guid, Is.EqualTo(expected.Guid.Value)); + Assert.That(actual.LongId, Is.EqualTo(expected.LongId.Value)); + Assert.That(actual.Bool, Is.EqualTo(expected.Bool.Value)); + try + { + Assert.That(actual.DateTime, Is.EqualTo(expected.DateTime.Value)); + } + catch (Exception ex) + { + Log.Error("Trouble with DateTime precisions, trying Assert again with rounding to seconds", ex); + Assert.That(actual.DateTime.RoundToSecond(), Is.EqualTo(expected.DateTime.Value.RoundToSecond())); + } + try + { + Assert.That(actual.Double, Is.EqualTo(expected.Double.Value)); + } + catch (Exception ex) + { + Log.Error("Trouble with double precisions, trying Assert again with rounding to 10 decimals", ex); + Assert.That(Math.Round(actual.Double, 10), Is.EqualTo(Math.Round(actual.Double, 10))); + } + } + } + + + public class ModelWithFieldsOfDifferentTypes + { + private static readonly ILog Log = LogManager.GetLogger(typeof(ModelWithFieldsOfDifferentTypes)); + + [AutoIncrement] + public int Id { get; set; } + + public string Name { get; set; } + + public long LongId { get; set; } + + public Guid Guid { get; set; } + + public bool Bool { get; set; } + + public DateTime DateTime { get; set; } + + public double Double { get; set; } + + public static ModelWithFieldsOfDifferentTypes Create(int id) + { + var row = new ModelWithFieldsOfDifferentTypes + { + Id = id, + Bool = id % 2 == 0, + DateTime = DateTime.Now.AddDays(id), + Double = 1.11d + id, + Guid = Guid.NewGuid(), + LongId = 999 + id, + Name = "Name" + id + }; + + return row; + } + + public static ModelWithFieldsOfDifferentTypes CreateConstant(int id) + { + var row = new ModelWithFieldsOfDifferentTypes + { + Id = id, + Bool = id % 2 == 0, + DateTime = new DateTime(1979, (id % 12) + 1, (id % 28) + 1), + Double = 1.11d + id, + Guid = new Guid(((id % 240) + 16).ToString("X") + "726E3B-9983-40B4-A8CB-2F8ADA8C8760"), + LongId = 999 + id, + Name = "Name" + id + }; + + return row; + } + + public override bool Equals(object obj) + { + var other = obj as ModelWithFieldsOfDifferentTypes; + if (other == null) return false; + + try + { + AssertIsEqual(this, other); + return true; + } + catch (Exception) + { + return false; + } + } + + public override int GetHashCode() + { + return (Id + Guid.ToString()).GetHashCode(); + } + + public static void AssertIsEqual(ModelWithFieldsOfDifferentTypes actual, ModelWithFieldsOfDifferentTypes expected) + { + if (actual.Id != expected.Id) + throw new Exception($"{actual.Id} != {expected.Id}"); + if (actual.Name != expected.Name) + throw new Exception($"{actual.Name} != {expected.Name}"); + if (actual.Guid != expected.Guid) + throw new Exception($"{actual.Guid} != {expected.Guid}"); + if (actual.LongId != expected.LongId) + throw new Exception($"{actual.LongId} != {expected.LongId}"); + if (actual.Bool != expected.Bool) + throw new Exception($"{actual.Bool} != {expected.Bool}"); + + if (actual.DateTime.RoundToSecond() != expected.DateTime.RoundToSecond()) + throw new Exception($"{actual.DateTime.RoundToSecond()} != {expected.DateTime.RoundToSecond()}"); + + if (Math.Abs(actual.Double - expected.Double) > 1) + throw new Exception($"{actual.Double} != {expected.Double}"); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypesFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypesFactory.cs new file mode 100644 index 00000000..4fe5ea36 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypesFactory.cs @@ -0,0 +1,22 @@ +using System; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithFieldsOfDifferentTypesFactory + : ModelFactoryBase + { + public static ModelWithFieldsOfDifferentTypesFactory Instance + = new ModelWithFieldsOfDifferentTypesFactory(); + + public override void AssertIsEqual( + ModelWithFieldsOfDifferentTypes actual, ModelWithFieldsOfDifferentTypes expected) + { + ModelWithFieldsOfDifferentTypes.AssertIsEqual(actual, expected); + } + + public override ModelWithFieldsOfDifferentTypes CreateInstance(int i) + { + return ModelWithFieldsOfDifferentTypes.CreateConstant(i); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfNullableTypes.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfNullableTypes.cs new file mode 100644 index 00000000..24a2acf4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfNullableTypes.cs @@ -0,0 +1,112 @@ +using System; +using NUnit.Framework; +using ServiceStack.Model; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithFieldsOfNullableTypes + : IHasIntId + { + private static readonly ILog Log = LogManager.GetLogger(typeof(ModelWithFieldsOfNullableTypes)); + + public int Id { get; set; } + public int? NId { get; set; } + + public long? NLongId { get; set; } + + public Guid? NGuid { get; set; } + + public bool? NBool { get; set; } + + public DateTime? NDateTime { get; set; } + + public float? NFloat { get; set; } + + public double? NDouble { get; set; } + + public decimal? NDecimal { get; set; } + + public TimeSpan? NTimeSpan { get; set; } + + public static ModelWithFieldsOfNullableTypes Create(int id) + { + var row = new ModelWithFieldsOfNullableTypes + { + Id = id, + NId = id, + NBool = id % 2 == 0, + NDateTime = DateTime.Now.AddDays(id), + NFloat = 1.11f + id, + NDouble = 1.11d + id, + NGuid = Guid.NewGuid(), + NLongId = 999 + id, + NDecimal = id + 0.5m, + NTimeSpan = TimeSpan.FromSeconds(id), + }; + + return row; + } + + public static ModelWithFieldsOfNullableTypes CreateConstant(int id) + { + var row = new ModelWithFieldsOfNullableTypes + { + Id = id, + NId = id, + NBool = id % 2 == 0, + NDateTime = new DateTime(1979, (id % 12) + 1, (id % 28) + 1), + NFloat = 1.11f + id, + NDouble = 1.11d + id, + NGuid = new Guid(((id % 240) + 16).ToString("X") + "7DA519-73B6-4525-84BA-B57673B2360D"), + NLongId = 999 + id, + NDecimal = id + 0.5m, + NTimeSpan = TimeSpan.FromSeconds(id), + }; + + return row; + } + + public static void AssertIsEqual(ModelWithFieldsOfNullableTypes actual, ModelWithFieldsOfNullableTypes expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.NId, Is.EqualTo(expected.NId)); + Assert.That(actual.NGuid, Is.EqualTo(expected.NGuid)); + Assert.That(actual.NLongId, Is.EqualTo(expected.NLongId)); + Assert.That(actual.NBool, Is.EqualTo(expected.NBool)); + Assert.That(actual.NTimeSpan, Is.EqualTo(expected.NTimeSpan)); + + try + { + Assert.That(actual.NDateTime, Is.EqualTo(expected.NDateTime)); + } + catch (Exception ex) + { + Log.Error("Trouble with DateTime precisions, trying Assert again with rounding to seconds", ex); + Assert.That(actual.NDateTime.Value.ToUniversalTime().RoundToSecond(), Is.EqualTo(expected.NDateTime.Value.ToUniversalTime().RoundToSecond())); + } + + try + { + Assert.That(actual.NFloat, Is.EqualTo(expected.NFloat)); + } + catch (Exception ex) + { + Log.Error("Trouble with float precisions, trying Assert again with rounding to 10 decimals", ex); + Assert.That(Math.Round(actual.NFloat.Value, 10), Is.EqualTo(Math.Round(actual.NFloat.Value, 10))); + } + + try + { + Assert.That(actual.NDouble, Is.EqualTo(expected.NDouble)); + } + catch (Exception ex) + { + Log.Error("Trouble with double precisions, trying Assert again with rounding to 10 decimals", ex); + Assert.That(Math.Round(actual.NDouble.Value, 10), Is.EqualTo(Math.Round(actual.NDouble.Value, 10))); + } + + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfNullableTypesFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfNullableTypesFactory.cs new file mode 100644 index 00000000..1c04743c --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfNullableTypesFactory.cs @@ -0,0 +1,20 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithFieldsOfNullableTypesFactory + : ModelFactoryBase + { + public static ModelWithFieldsOfNullableTypesFactory Instance + = new ModelWithFieldsOfNullableTypesFactory(); + + public override void AssertIsEqual( + ModelWithFieldsOfNullableTypes actual, ModelWithFieldsOfNullableTypes expected) + { + ModelWithFieldsOfNullableTypes.AssertIsEqual(actual, expected); + } + + public override ModelWithFieldsOfNullableTypes CreateInstance(int i) + { + return ModelWithFieldsOfNullableTypes.CreateConstant(i); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithIdAndName.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithIdAndName.cs new file mode 100644 index 00000000..ec04f3a4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithIdAndName.cs @@ -0,0 +1,64 @@ +using System; +using NUnit.Framework; +using ServiceStack.DataAnnotations; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithIdAndName + { + public ModelWithIdAndName() + { + } + + public ModelWithIdAndName(int id) + { + Id = id; + Name = "Name" + id; + } + + [AutoIncrement] + public int Id { get; set; } + + public string Name { get; set; } + + public static ModelWithIdAndName Create(int id) + { + return new ModelWithIdAndName(id); + } + + public static void AssertIsEqual(ModelWithIdAndName actual, ModelWithIdAndName expected) + { + if (actual == null || expected == null) + { + Assert.That(actual == expected, Is.True); + return; + } + + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.Name, Is.EqualTo(expected.Name)); + } + + public bool Equals(ModelWithIdAndName other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return other.Id == Id && Equals(other.Name, Name); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(ModelWithIdAndName)) return false; + return Equals((ModelWithIdAndName)obj); + } + + public override int GetHashCode() + { + unchecked + { + return (Id * 397) ^ (Name != null ? Name.GetHashCode() : 0); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithIdOnly.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithIdOnly.cs new file mode 100644 index 00000000..2c531b6a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithIdOnly.cs @@ -0,0 +1,18 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithIdOnly + { + public ModelWithIdOnly() + { + } + + public ModelWithIdOnly(long id) + { + Id = id; + } + + // must be long as you cannot have a table with only an autoincrement field + public long Id { get; set; } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithIndexFields.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithIndexFields.cs new file mode 100644 index 00000000..23229083 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithIndexFields.cs @@ -0,0 +1,17 @@ +using ServiceStack.DataAnnotations; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithIndexFields + { + public string Id { get; set; } + + [Index] + public string Name { get; set; } + + public string AlbumId { get; set; } + + [Index(true)] + public string UniqueName { get; set; } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithLongIdAndStringFields.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithLongIdAndStringFields.cs new file mode 100644 index 00000000..12a696ba --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithLongIdAndStringFields.cs @@ -0,0 +1,13 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithLongIdAndStringFields + { + public long Id { get; set; } + + public string Name { get; set; } + + public string AlbumId { get; set; } + + public string AlbumName { get; set; } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithMapAndList.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithMapAndList.cs new file mode 100644 index 00000000..60c911e6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithMapAndList.cs @@ -0,0 +1,42 @@ +using System.Collections.Generic; +using NUnit.Framework; + +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithMapAndList + { + public ModelWithMapAndList() + { + this.Map = new Dictionary(); + this.List = new List(); + } + + public ModelWithMapAndList(int id) + : this() + { + Id = id; + Name = "Name" + id; + } + + public int Id { get; set; } + + public string Name { get; set; } + + public Dictionary Map { get; set; } + + public List List { get; set; } + + public static ModelWithMapAndList Create(int id) + { + return new ModelWithMapAndList(id); + } + + public static void AssertIsEqual(ModelWithMapAndList actual, ModelWithMapAndList expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.Name, Is.EqualTo(expected.Name)); + Assert.That(actual.Map, Is.EquivalentTo(expected.Map)); + Assert.That(actual.List, Is.EquivalentTo(expected.List)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithNamedCompositeIndex.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithNamedCompositeIndex.cs new file mode 100644 index 00000000..28670876 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithNamedCompositeIndex.cs @@ -0,0 +1,22 @@ +using ServiceStack.DataAnnotations; + +namespace ServiceStack.Common.Tests.Models +{ + [CompositeIndex(true, "Composite1", "Composite2", Name = "custom_index_name")] + public class ModelWithNamedCompositeIndex + { + public string Id { get; set; } + + [Index] + public string Name { get; set; } + + public string AlbumId { get; set; } + + [Index(true)] + public string UniqueName { get; set; } + + public string Composite1 { get; set; } + + public string Composite2 { get; set; } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithOnlyStringFields.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithOnlyStringFields.cs new file mode 100644 index 00000000..c8522540 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithOnlyStringFields.cs @@ -0,0 +1,24 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class ModelWithOnlyStringFields + { + public string Id { get; set; } + + public string Name { get; set; } + + public string AlbumId { get; set; } + + public string AlbumName { get; set; } + + public static ModelWithOnlyStringFields Create(string id) + { + return new ModelWithOnlyStringFields + { + Id = id, + Name = "Name", + AlbumId = "AlbumId", + AlbumName = "AlbumName", + }; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/Movie.cs b/tests/ServiceStack.Redis.Tests/Shared/Movie.cs new file mode 100644 index 00000000..afcac8b7 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/Movie.cs @@ -0,0 +1,67 @@ +using System; +using System.Collections.Generic; +using System.Runtime.Serialization; +using ServiceStack.Common; + +namespace ServiceStack.Common.Tests.Models +{ + [DataContract] + public class Movie + { + public Movie() + { + this.Genres = new List(); + } + + [DataMember] + public string Id { get; set; } + + [DataMember] + public string Title { get; set; } + + [DataMember] + public decimal Rating { get; set; } + + [DataMember] + public string Director { get; set; } + + [DataMember] + public DateTime ReleaseDate { get; set; } + + [DataMember] + public string TagLine { get; set; } + + [DataMember] + public List Genres { get; set; } + + public bool Equals(Movie other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return Equals(other.Id, Id) && Equals(other.Title, Title) && other.Rating == Rating && Equals(other.Director, Director) && other.ReleaseDate.Equals(ReleaseDate) && Equals(other.TagLine, TagLine) && Genres.EquivalentTo(other.Genres); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(Movie)) return false; + return Equals((Movie)obj); + } + + public override int GetHashCode() + { + unchecked + { + int result = (Id != null ? Id.GetHashCode() : 0); + result = (result * 397) ^ (Title != null ? Title.GetHashCode() : 0); + result = (result * 397) ^ Rating.GetHashCode(); + result = (result * 397) ^ (Director != null ? Director.GetHashCode() : 0); + result = (result * 397) ^ ReleaseDate.GetHashCode(); + result = (result * 397) ^ (TagLine != null ? TagLine.GetHashCode() : 0); + result = (result * 397) ^ (Genres != null ? Genres.GetHashCode() : 0); + return result; + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/PerfTestBase.cs b/tests/ServiceStack.Redis.Tests/Shared/PerfTestBase.cs new file mode 100644 index 00000000..b8722b53 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/PerfTestBase.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; + +namespace ServiceStack.Common.Tests +{ + public class PerfTestBase + { + protected int DefaultIterations { get; set; } + protected List MultipleIterations { get; set; } + + public PerfTestBase() + { + this.DefaultIterations = 10000; + this.MultipleIterations = new List { 1000, 10000, 100000, 1000000 }; + } + + protected StringBuilder SbLog = new StringBuilder(); + + public void Log(string message, params object[] args) + { + Console.WriteLine(message, args); + + SbLog.AppendFormat(message, args); + SbLog.AppendLine(); + } + + protected void CompareMultipleRuns(string run1Name, Action run1Action, string run2Name, Action run2Action) + { + WarmUp(run1Action, run2Action); + foreach (var iteration in this.MultipleIterations) + { + Log("\n{0} times:", iteration); + CompareRuns(iteration, run1Name, run1Action, run2Name, run2Action); + } + } + + protected void CompareRuns(string run1Name, Action run1Action, string run2Name, Action run2Action) + { + CompareRuns(DefaultIterations, run1Name, run1Action, run2Name, run2Action); + } + + protected void CompareRuns(int iterations, string run1Name, Action run1Action, string run2Name, Action run2Action) + { + var run1 = RunAction(run1Action, iterations, run1Name); + var run2 = RunAction(run2Action, iterations, run2Name); + + var runDiff = run1.Ticks - run2.Ticks; + var run1IsSlower = runDiff > 0; + var slowerRun = run1IsSlower ? run1Name : run2Name; + var fasterRun = run1IsSlower ? run2Name : run1Name; + var runDiffTime = run1IsSlower ? runDiff : runDiff * -1; + var runDiffAvg = run1IsSlower ? run1.Ticks / (double)run2.Ticks : run2.Ticks / (double)run1.Ticks; + + Log("{0} was {1}ms or {2} times slower than {3}", + slowerRun, runDiffTime, Math.Round(runDiffAvg, 2), fasterRun); + } + + protected void WarmUp(params Action[] actions) + { + foreach (var action in actions) + { + action(); + GC.Collect(); + } + } + + protected void RunMultipleTimes(Action action, string actionName) + { + WarmUp(action); + foreach (var iteration in this.MultipleIterations) + { + Log("\n{0} times:", iteration); + RunAction(action, iteration, actionName ?? "Action"); + } + } + + protected TimeSpan RunAction(Action action, int iterations) + { + return RunAction(action, iterations, null); + } + + protected TimeSpan RunAction(Action action, int iterations, string actionName) + { + actionName = actionName ?? action.GetType().Name; + var ticksTaken = Measure(action, iterations); + var timeSpan = TimeSpan.FromSeconds(ticksTaken * 1d / Stopwatch.Frequency); + + Log("{0} took {1}ms ({2} ticks), avg: {3} ticks", actionName, timeSpan.TotalMilliseconds, timeSpan.Ticks, (timeSpan.Ticks / iterations)); + + return timeSpan; + } + + protected long Measure(Action action, decimal iterations) + { + GC.Collect(); + var begin = Stopwatch.GetTimestamp(); + + for (var i = 0; i < iterations; i++) + { + action(); + } + + var end = Stopwatch.GetTimestamp(); + + return (end - begin); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/Poco.cs b/tests/ServiceStack.Redis.Tests/Shared/Poco.cs new file mode 100644 index 00000000..daa36f98 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/Poco.cs @@ -0,0 +1,7 @@ +namespace ServiceStack.Common.Tests.Models +{ + public class Poco + { + public string Name { get; set; } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/SampleOrderLine.cs b/tests/ServiceStack.Redis.Tests/Shared/SampleOrderLine.cs new file mode 100644 index 00000000..d25fefff --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/SampleOrderLine.cs @@ -0,0 +1,153 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using ServiceStack.Model; + +namespace ServiceStack.Common.Tests.Models +{ + public class SampleOrderLine + : IHasStringId + { + public string Id { get; set; } + + public string OrderUrn + { + get + { + return CreateUrn(this.UserId, this.OrderId, this.OrderLineId); + } + } + + public long OrderId { get; set; } + + public long OrderLineId { get; set; } + + public DateTime CreatedDate { get; set; } + + public Guid UserId { get; set; } + + public string UserName { get; set; } + + public Guid ProductId { get; set; } + + public string MflowUrn { get; set; } + + public string ProductType { get; set; } + + public string Description { get; set; } + + public string UpcEan { get; set; } + + public string Isrc { get; set; } + + public Guid? RecommendationUserId { get; set; } + + public string RecommendationUserName { get; set; } + + public string SupplierKeyName { get; set; } + + public string CostTierKeyName { get; set; } + + public string PriceTierKeyName { get; set; } + + public decimal VatRate { get; set; } + + public int ProductPriceIncVat { get; set; } + + public int Quantity { get; set; } + + public decimal TransactionValueExVat { get; set; } + + public decimal TransactionValueIncVat { get; set; } + + public decimal RecommendationDiscountRate { get; set; } + + public decimal DistributionDiscountRate { get; set; } + + public decimal RecommendationDiscountAccruedExVat { get; set; } + + public decimal DistributionDiscountAccruedExVat { get; set; } + + public decimal PromoMix { get; set; } + + public decimal DiscountMix { get; set; } + + public decimal CashMix { get; set; } + + public decimal PromoMixValueExVat { get; set; } + + public decimal DiscountMixValueExVat { get; set; } + + public decimal CashMixValueIncVat { get; set; } + + public string ContentUrn + { + get { return this.MflowUrn; } + set { this.MflowUrn = value; } + } + + public string TrackUrn + { + get; + set; + } + + public string Title + { + get; + set; + } + + public string ArtistUrn + { + get; + set; + } + + public string ArtistName + { + get; + set; + } + + public string AlbumUrn + { + get; + set; + } + + public string AlbumName + { + get; + set; + } + + public static string CreateUrn(Guid userId, long orderId, long orderLineId) + { + return string.Format("urn:orderline:{0}/{1}/{2}", + userId.ToString("N"), orderId, orderLineId); + } + + public static SampleOrderLine Create(Guid userId) + { + return Create(userId, 1, 1); + } + + public static SampleOrderLine Create(Guid userId, int orderId, int orderLineId) + { + return new SampleOrderLine + { + Id = CreateUrn(userId, orderId, orderLineId), + CreatedDate = DateTime.Now, + OrderId = orderId, + OrderLineId = orderLineId, + AlbumName = "AlbumName", + CashMixValueIncVat = 0.79m / 1.15m, + TransactionValueExVat = 0.79m, + ContentUrn = "urn:content:" + Guid.NewGuid().ToString("N"), + }; + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/Shipper.cs b/tests/ServiceStack.Redis.Tests/Shared/Shipper.cs new file mode 100644 index 00000000..1af895c3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/Shipper.cs @@ -0,0 +1,36 @@ +using System; +using ServiceStack.Model; + +namespace ServiceStack.Common.Tests.Models +{ + public class Shipper + : IHasIntId + { + public int Id { get; set; } + public string CompanyName { get; set; } + public ShipperType ShipperType { get; set; } + public DateTime DateCreated { get; set; } + public Guid UniqueRef { get; set; } + + public override bool Equals(object obj) + { + var other = obj as Shipper; + if (other == null) return false; + return this.Id == other.Id && this.UniqueRef == other.UniqueRef; + } + + public override int GetHashCode() + { + return string.Concat(Id, UniqueRef).GetHashCode(); + } + } + + public enum ShipperType + { + All = Planes | Trains | Automobiles, + Unknown = 0, + Planes = 1, + Trains = 2, + Automobiles = 4 + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ShipperFactory.cs b/tests/ServiceStack.Redis.Tests/Shared/ShipperFactory.cs new file mode 100644 index 00000000..713784e8 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/ShipperFactory.cs @@ -0,0 +1,31 @@ +using System; +using NUnit.Framework; + +namespace ServiceStack.Common.Tests.Models +{ + public class ShipperFactory + : ModelFactoryBase + { + public override Shipper CreateInstance(int i) + { + var hex = ((i % 240) + 16).ToString("X"); + return new Shipper + { + Id = i, + CompanyName = "Shipper" + i, + DateCreated = new DateTime(i + 1 % 3000, (i % 11) + 1, (i % 27) + 1, 0, 0, 0, DateTimeKind.Utc), + ShipperType = (ShipperType)(i % 3), + UniqueRef = new Guid(hex + "D148A5-E5F1-4E5A-8C60-52E5A80ACCC6"), + }; + } + + public override void AssertIsEqual(Shipper actual, Shipper expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.CompanyName, Is.EqualTo(expected.CompanyName)); + Assert.That(actual.ShipperType, Is.EqualTo(expected.ShipperType)); + Assert.That(actual.DateCreated, Is.EqualTo(expected.DateCreated)); + Assert.That(actual.UniqueRef, Is.EqualTo(expected.UniqueRef)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/TaskQueue.cs b/tests/ServiceStack.Redis.Tests/Shared/TaskQueue.cs new file mode 100644 index 00000000..355e15ec --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Shared/TaskQueue.cs @@ -0,0 +1,76 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.Logging; +using ServiceStack.Text; + +namespace ServiceStack.Common.Tests.Models +{ + public class TaskQueue + { + private static readonly ILog Log = LogManager.GetLogger(typeof(TaskQueue)); + + public const string TaskLoad = "Load"; + public const string TaskIndex = "Index"; + + public const string StatusPending = "Pending"; + public const string StatusStarted = "Started"; + public const string StatusCompleted = "Completed"; + public const string StatusFailed = "Failed"; + + public const int PriorityLow = 0; + public const int PriorityMedium = 1; + public const int PriorityHigh = 2; + + public int Id { get; set; } + + public Guid? UserId { get; set; } + + public string Task { get; set; } + + public string ContentUrn { get; set; } + + public string Status { get; set; } + + public DateTime CreatedDate { get; set; } + + public int Priority { get; set; } + + public int NoOfAttempts { get; set; } + + public string ErrorMessage { get; set; } + + public static TaskQueue Create(int id) + { + return new TaskQueue + { + ContentUrn = "urn:track:" + id, + CreatedDate = DateTime.Now, + Task = TaskLoad, + Status = StatusPending, + NoOfAttempts = 0, + }; + } + + public static void AssertIsEqual(TaskQueue actual, TaskQueue expected) + { + Assert.That(actual.Id, Is.EqualTo(expected.Id)); + Assert.That(actual.UserId, Is.EqualTo(expected.UserId)); + Assert.That(actual.ContentUrn, Is.EqualTo(expected.ContentUrn)); + Assert.That(actual.Status, Is.EqualTo(expected.Status)); + try + { + Assert.That(actual.CreatedDate, Is.EqualTo(expected.CreatedDate)); + } + catch (Exception ex) + { + Log.Error("Trouble with DateTime precisions, trying Assert again with rounding to seconds", ex); + Assert.That(actual.CreatedDate.RoundToSecond(), Is.EqualTo(expected.CreatedDate.RoundToSecond())); + } + Assert.That(actual.Priority, Is.EqualTo(expected.Priority)); + Assert.That(actual.NoOfAttempts, Is.EqualTo(expected.NoOfAttempts)); + Assert.That(actual.ErrorMessage, Is.EqualTo(expected.ErrorMessage)); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ShippersExample.Async.cs b/tests/ServiceStack.Redis.Tests/ShippersExample.Async.cs new file mode 100644 index 00000000..c0154116 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/ShippersExample.Async.cs @@ -0,0 +1,126 @@ +// +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 Service Stack LLC. All Rights Reserved. +// +// Licensed under the same terms of reddis and ServiceStack: new BSD license. +// + +using System; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + public class ShippersExampleAsync + { + + public class Shipper + { + public long Id { get; set; } + public string CompanyName { get; set; } + public DateTime DateCreated { get; set; } + public ShipperType ShipperType { get; set; } + public Guid UniqueRef { get; set; } + } + + static void Dump(string message, T entity) + { + var text = TypeSerializer.SerializeToString(entity); + + //make it a little easier on the eyes + var prettyLines = text.Split(new[] { "[", "},{", "]" }, + StringSplitOptions.RemoveEmptyEntries) + .ToList().ConvertAll(x => x.Replace("{", "").Replace("}", "")); + + Debug.WriteLine("\n" + message); + foreach (var l in prettyLines) Debug.WriteLine(l); + } + + [Test] + public async Task Shippers_UseCase() + { + await using var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + //Create a 'strongly-typed' API that makes all Redis Value operations to apply against Shippers + IRedisTypedClientAsync redis = redisClient.As(); + + //Redis lists implement IList while Redis sets implement ICollection + var currentShippers = redis.Lists["urn:shippers:current"]; + var prospectiveShippers = redis.Lists["urn:shippers:prospective"]; + + await currentShippers.AddAsync( + new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "Trains R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Trains, + UniqueRef = Guid.NewGuid() + }); + + await currentShippers.AddAsync( + new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "Planes R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Planes, + UniqueRef = Guid.NewGuid() + }); + + var lameShipper = new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "We do everything!", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.All, + UniqueRef = Guid.NewGuid() + }; + + await currentShippers.AddAsync(lameShipper); + + Dump("ADDED 3 SHIPPERS:", await currentShippers.ToListAsync()); + + await currentShippers.RemoveAsync(lameShipper); + + Dump("REMOVED 1:", await currentShippers.ToListAsync()); + + await prospectiveShippers.AddAsync( + new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "Trucks R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Automobiles, + UniqueRef = Guid.NewGuid() + }); + + Dump("ADDED A PROSPECTIVE SHIPPER:", await prospectiveShippers.ToListAsync()); + + await redis.PopAndPushItemBetweenListsAsync(prospectiveShippers, currentShippers); + + Dump("CURRENT SHIPPERS AFTER POP n' PUSH:", await currentShippers.ToListAsync()); + Dump("PROSPECTIVE SHIPPERS AFTER POP n' PUSH:", await prospectiveShippers.ToListAsync()); + + var poppedShipper = await redis.PopItemFromListAsync(currentShippers); + Dump("POPPED a SHIPPER:", poppedShipper); + Dump("CURRENT SHIPPERS AFTER POP:", await currentShippers.ToListAsync()); + + //reset sequence and delete all lists + await redis.SetSequenceAsync(0); + await redis.RemoveEntryAsync(new[] { currentShippers, prospectiveShippers }); + Dump("DELETING CURRENT AND PROSPECTIVE SHIPPERS:", await currentShippers.ToListAsync()); + + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/ShippersExample.cs b/tests/ServiceStack.Redis.Tests/ShippersExample.cs index d49247d9..79a23688 100644 --- a/tests/ServiceStack.Redis.Tests/ShippersExample.cs +++ b/tests/ServiceStack.Redis.Tests/ShippersExample.cs @@ -4,7 +4,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2010 Liquidbit Ltd. +// Copyright 2013 Service Stack LLC. All Rights Reserved. // // Licensed under the same terms of reddis and ServiceStack: new BSD license. // @@ -19,105 +19,109 @@ namespace ServiceStack.Redis.Tests { - [TestFixture] - public class ShippersExample - { - - public class Shipper - { - public long Id { get; set; } - public string CompanyName { get; set; } - public DateTime DateCreated { get; set; } - public ShipperType ShipperType { get; set; } - public Guid UniqueRef { get; set; } - } - - static void Dump(string message, T entity) - { - var text = TypeSerializer.SerializeToString(entity); - - //make it a little easier on the eyes - var prettyLines = text.Split(new[] { "[", "},{", "]" }, - StringSplitOptions.RemoveEmptyEntries) - .ToList().ConvertAll(x => x.Replace("{", "").Replace("}", "")); - - Debug.WriteLine("\n" + message); - foreach(var l in prettyLines) Debug.WriteLine(l); - } - - [Test] - public void Shippers_UseCase() - { - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - //Create a 'strongly-typed' API that makes all Redis Value operations to apply against Shippers - IRedisTypedClient redis = redisClient.GetTypedClient(); - - //Redis lists implement IList while Redis sets implement ICollection - var currentShippers = redis.Lists["urn:shippers:current"]; - var prospectiveShippers = redis.Lists["urn:shippers:prospective"]; - - currentShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Trains R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Trains, - UniqueRef = Guid.NewGuid() - }); - - currentShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Planes R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Planes, - UniqueRef = Guid.NewGuid() - }); - - var lameShipper = new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "We do everything!", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.All, - UniqueRef = Guid.NewGuid() - }; - - currentShippers.Add(lameShipper); - - Dump("ADDED 3 SHIPPERS:", currentShippers); - - currentShippers.Remove(lameShipper); - - Dump("REMOVED 1:", currentShippers); - - prospectiveShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Trucks R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Automobiles, - UniqueRef = Guid.NewGuid() - }); - - Dump("ADDED A PROSPECTIVE SHIPPER:", prospectiveShippers); - - redis.PopAndPushItemBetweenLists(prospectiveShippers, currentShippers); - - Dump("CURRENT SHIPPERS AFTER POP n' PUSH:", currentShippers); - Dump("PROSPECTIVE SHIPPERS AFTER POP n' PUSH:", prospectiveShippers); - - var poppedShipper = redis.PopItemFromList(currentShippers); - Dump("POPPED a SHIPPER:", poppedShipper); - Dump("CURRENT SHIPPERS AFTER POP:", currentShippers); - - //reset sequence and delete all lists - redis.SetSequence(0); - redis.RemoveEntry(currentShippers, prospectiveShippers); - Dump("DELETING CURRENT AND PROSPECTIVE SHIPPERS:", currentShippers); - } - - } - - } -} \ No newline at end of file + [TestFixture] + public class ShippersExample + { + + public class Shipper + { + public long Id { get; set; } + public string CompanyName { get; set; } + public DateTime DateCreated { get; set; } + public ShipperType ShipperType { get; set; } + public Guid UniqueRef { get; set; } + } + + static void Dump(string message, T entity) + { + var text = TypeSerializer.SerializeToString(entity); + + //make it a little easier on the eyes + var prettyLines = text.Split(new[] { "[", "},{", "]" }, + StringSplitOptions.RemoveEmptyEntries) + .ToList().ConvertAll(x => x.Replace("{", "").Replace("}", "")); + + Debug.WriteLine("\n" + message); + foreach (var l in prettyLines) Debug.WriteLine(l); + } + + [Test] + public void Shippers_UseCase() + { + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + //Create a 'strongly-typed' API that makes all Redis Value operations to apply against Shippers + IRedisTypedClient redis = redisClient.As(); + + //Redis lists implement IList while Redis sets implement ICollection + var currentShippers = redis.Lists["urn:shippers:current"]; + var prospectiveShippers = redis.Lists["urn:shippers:prospective"]; + + currentShippers.Add( + new Shipper + { + Id = redis.GetNextSequence(), + CompanyName = "Trains R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Trains, + UniqueRef = Guid.NewGuid() + }); + + currentShippers.Add( + new Shipper + { + Id = redis.GetNextSequence(), + CompanyName = "Planes R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Planes, + UniqueRef = Guid.NewGuid() + }); + + var lameShipper = new Shipper + { + Id = redis.GetNextSequence(), + CompanyName = "We do everything!", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.All, + UniqueRef = Guid.NewGuid() + }; + + currentShippers.Add(lameShipper); + + Dump("ADDED 3 SHIPPERS:", currentShippers); + + currentShippers.Remove(lameShipper); + + Dump("REMOVED 1:", currentShippers); + + prospectiveShippers.Add( + new Shipper + { + Id = redis.GetNextSequence(), + CompanyName = "Trucks R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Automobiles, + UniqueRef = Guid.NewGuid() + }); + + Dump("ADDED A PROSPECTIVE SHIPPER:", prospectiveShippers); + + redis.PopAndPushItemBetweenLists(prospectiveShippers, currentShippers); + + Dump("CURRENT SHIPPERS AFTER POP n' PUSH:", currentShippers); + Dump("PROSPECTIVE SHIPPERS AFTER POP n' PUSH:", prospectiveShippers); + + var poppedShipper = redis.PopItemFromList(currentShippers); + Dump("POPPED a SHIPPER:", poppedShipper); + Dump("CURRENT SHIPPERS AFTER POP:", currentShippers); + + //reset sequence and delete all lists + redis.SetSequence(0); + redis.RemoveEntry(currentShippers, prospectiveShippers); + Dump("DELETING CURRENT AND PROSPECTIVE SHIPPERS:", currentShippers); + } + + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/SslTests.cs b/tests/ServiceStack.Redis.Tests/SslTests.cs new file mode 100644 index 00000000..ae082a06 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/SslTests.cs @@ -0,0 +1,350 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Linq; +using System.Net.Security; +using System.Net.Sockets; +using System.Text; +using System.Threading; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Configuration; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [Ignore("Requires ~/azureconfig.txt")] + [TestFixture, Category("Integration")] + public class SslTests + { + private string Host; + private int Port; + private string Password; + private string connectionString; + + [OneTimeSetUp] + public void OneTimeSetUp() + { + var settings = new TextFileSettings("~/azureconfig.txt".MapProjectPath()); + Host = settings.GetString("Host"); + Port = settings.Get("Port", 6379); + Password = settings.GetString("Password"); + connectionString = "{0}@{1}".Fmt(Password, Host); + } + + [Test] + public void Can_connect_to_azure_redis() + { + using (var client = new RedisClient(connectionString)) + { + client.Set("foo", "bar"); + var foo = client.GetValue("foo"); + foo.Print(); + } + } + + [Test] + public void Can_connect_to_ssl_azure_redis() + { + using (var client = new RedisClient(connectionString)) + { + client.Set("foo", "bar"); + var foo = client.GetValue("foo"); + foo.Print(); + } + } + + [Test] + public void Can_connect_to_ssl_azure_redis_with_UrlFormat() + { + var url = "redis://{0}?ssl=true&password={1}".Fmt(Host, Password.UrlEncode()); + using (var client = new RedisClient(url)) + { + client.Set("foo", "bar"); + var foo = client.GetValue("foo"); + foo.Print(); + } + } + + [Test] + public void Can_connect_to_ssl_azure_redis_with_UrlFormat_Custom_SSL_Protocol () + { + var url = "redis://{0}?ssl=true&sslprotocols=Tls12&password={1}".Fmt(Host, Password.UrlEncode()); + using (var client = new RedisClient(url)) + { + client.Set("foo", "bar"); + var foo = client.GetValue("foo"); + foo.Print(); + } + } + + [Test] + public void Can_connect_to_ssl_azure_redis_with_PooledClientsManager() + { + using (var redisManager = new PooledRedisClientManager(connectionString)) + using (var client1 = redisManager.GetClient()) + using (var client2 = redisManager.GetClient()) + { + client1.Set("foo", "bar"); + var foo = client2.GetValue("foo"); + foo.Print(); + } + } + + [Test] + public void Can_connect_to_NetworkStream() + { + var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) + { + SendTimeout = -1, + ReceiveTimeout = -1, + }; + + socket.Connect(Host, 6379); + + if (!socket.Connected) + { + socket.Close(); + throw new Exception("Could not connect"); + } + + Stream networkStream = new NetworkStream(socket); + + SendAuth(networkStream); + } + + [Test] + public void Can_connect_to_Buffered_SslStream() + { + var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp) + { + SendTimeout = -1, + ReceiveTimeout = -1, + }; + + socket.Connect(Host, Port); + + if (!socket.Connected) + { + socket.Close(); + throw new Exception("Could not connect"); + } + + Stream networkStream = new NetworkStream(socket); + + SslStream sslStream; + + if (Env.IsMono) + { + //Mono doesn't support EncryptionPolicy + sslStream = new SslStream(networkStream, + leaveInnerStreamOpen: false, + userCertificateValidationCallback: RedisConfig.CertificateValidationCallback, + userCertificateSelectionCallback: RedisConfig.CertificateSelectionCallback); + } + else + { + var ctor = typeof(SslStream).GetConstructors() + .First(x => x.GetParameters().Length == 5); + + var policyType = AssemblyUtils.FindType("System.Net.Security.EncryptionPolicy"); + var policyValue = Enum.Parse(policyType, "RequireEncryption"); + + sslStream = (SslStream)ctor.Invoke(new[] { + networkStream, + false, + RedisConfig.CertificateValidationCallback, + RedisConfig.CertificateSelectionCallback, + policyValue, + }); + + //sslStream = new SslStream(networkStream, + // leaveInnerStreamOpen: false, + // userCertificateValidationCallback: null, + // userCertificateSelectionCallback: null, + // encryptionPolicy: EncryptionPolicy.RequireEncryption); + } + +#if NETCORE + sslStream.AuthenticateAsClientAsync(Host).Wait(); +#else + sslStream.AuthenticateAsClient(Host); +#endif + + if (!sslStream.IsEncrypted) + throw new Exception("Could not establish an encrypted connection to " + Host); + + var bstream = new System.IO.BufferedStream(sslStream, 16 * 1024); + + SendAuth(bstream); + } + + private readonly byte[] endData = new[] { (byte)'\r', (byte)'\n' }; + private void SendAuth(Stream stream) + { + WriteAllToStream(stream, "AUTH".ToUtf8Bytes(), Password.ToUtf8Bytes()); + ExpectSuccess(stream); + } + + public void WriteAllToStream(Stream stream, params byte[][] cmdWithBinaryArgs) + { + WriteToStream(stream, GetCmdBytes('*', cmdWithBinaryArgs.Length)); + + foreach (var safeBinaryValue in cmdWithBinaryArgs) + { + WriteToStream(stream, GetCmdBytes('$', safeBinaryValue.Length)); + WriteToStream(stream, safeBinaryValue); + WriteToStream(stream, endData); + } + + stream.Flush(); + } + + public void WriteToStream(Stream stream, byte[] bytes) + { + stream.Write(bytes, 0, bytes.Length); + } + + private static byte[] GetCmdBytes(char cmdPrefix, int noOfLines) + { + var strLines = noOfLines.ToString(); + var strLinesLength = strLines.Length; + + var cmdBytes = new byte[1 + strLinesLength + 2]; + cmdBytes[0] = (byte)cmdPrefix; + + for (var i = 0; i < strLinesLength; i++) + cmdBytes[i + 1] = (byte)strLines[i]; + + cmdBytes[1 + strLinesLength] = 0x0D; // \r + cmdBytes[2 + strLinesLength] = 0x0A; // \n + + return cmdBytes; + } + + protected void ExpectSuccess(Stream stream) + { + int c = stream.ReadByte(); + if (c == -1) + throw new RedisRetryableException("No more data"); + + var s = ReadLine(stream); + s.Print(); + + if (c == '-') + throw new Exception(s.StartsWith("ERR") && s.Length >= 4 ? s.Substring(4) : s); + } + + protected string ReadLine(Stream stream) + { + var sb = new StringBuilder(); + + int c; + while ((c = stream.ReadByte()) != -1) + { + if (c == '\r') + continue; + if (c == '\n') + break; + sb.Append((char)c); + } + return sb.ToString(); + } + + //[Conditional("DEBUG")] + protected static void Log(string fmt, params object[] args) + { + //Debug.WriteLine(String.Format(fmt, args)); + Console.WriteLine(fmt, args); + } + + [Test] + public void SSL_can_support_64_threads_using_the_client_sequentially() + { + var results = 100.Times(x => ModelWithFieldsOfDifferentTypes.Create(x)); + var testData = TypeSerializer.SerializeToString(results); + + var before = Stopwatch.GetTimestamp(); + + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + + using (var redisClient = new RedisClient(connectionString)) + { + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + UseClient(redisClient, clientNo, testData); + } + } + + Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); + } + + [Test] + public void SSL_can_support_64_threads_using_the_client_simultaneously() + { + var results = 100.Times(x => ModelWithFieldsOfDifferentTypes.Create(x)); + var testData = TypeSerializer.SerializeToString(results); + + var before = Stopwatch.GetTimestamp(); + + const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + + var clientAsyncResults = new List(); + using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.ReplicaHosts)) + { + manager.GetClient().Run(x => x.FlushAll()); + + for (var i = 0; i < noOfConcurrentClients; i++) + { + var clientNo = i; + var action = (Action)(() => UseClientAsync(manager, clientNo, testData)); + clientAsyncResults.Add(action.BeginInvoke(null, null)); + } + } + + WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); + + Debug.WriteLine(String.Format("Completed in {0} ticks", (Stopwatch.GetTimestamp() - before))); + } + + private static void UseClientAsync(IRedisClientsManager manager, int clientNo, string testData) + { + using (var client = manager.GetReadOnlyClient()) + { + UseClient(client, clientNo, testData); + } + } + + private static void UseClient(IRedisClient client, int clientNo, string testData) + { + var host = ""; + + try + { + host = client.Host; + + Log("Client '{0}' is using '{1}'", clientNo, client.Host); + + var testClientKey = "test:" + host + ":" + clientNo; + client.SetValue(testClientKey, testData); + var result = client.GetValue(testClientKey) ?? ""; + + Log("\t{0} => {1} len {2} {3} len", testClientKey, + testData.Length, testData.Length == result.Length ? "==" : "!=", result.Length); + } + catch (NullReferenceException ex) + { + Debug.WriteLine("NullReferenceException StackTrace: \n" + ex.StackTrace); + Assert.Fail(ex.Message); + } + catch (Exception ex) + { + Debug.WriteLine(String.Format("\t[ERROR@{0}]: {1} => {2}", + host, ex.GetType().Name, ex.Message)); + Assert.Fail(ex.Message); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Support/CustomType.cs b/tests/ServiceStack.Redis.Tests/Support/CustomType.cs index ff9af135..1620e8fd 100644 --- a/tests/ServiceStack.Redis.Tests/Support/CustomType.cs +++ b/tests/ServiceStack.Redis.Tests/Support/CustomType.cs @@ -1,28 +1,31 @@ +using ServiceStack.Text; + namespace ServiceStack.Redis.Tests.Support { - public class CustomType - { - public long CustomId { get; set; } - public string CustomName { get; set; } + [RuntimeSerializable] + public class CustomType + { + public long CustomId { get; set; } + public string CustomName { get; set; } - public bool Equals(CustomType other) - { - if (ReferenceEquals(null, other)) return false; - if (ReferenceEquals(this, other)) return true; - return other.CustomId == CustomId; - } + public bool Equals(CustomType other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return other.CustomId == CustomId; + } - public override bool Equals(object obj) - { - if (ReferenceEquals(null, obj)) return false; - if (ReferenceEquals(this, obj)) return true; - if (obj.GetType() != typeof (CustomType)) return false; - return Equals((CustomType) obj); - } + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(CustomType)) return false; + return Equals((CustomType)obj); + } - public override int GetHashCode() - { - return CustomId.GetHashCode(); - } - } + public override int GetHashCode() + { + return CustomId.GetHashCode(); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Support/CustomTypeFactory.cs b/tests/ServiceStack.Redis.Tests/Support/CustomTypeFactory.cs index f17351fe..69221c06 100644 --- a/tests/ServiceStack.Redis.Tests/Support/CustomTypeFactory.cs +++ b/tests/ServiceStack.Redis.Tests/Support/CustomTypeFactory.cs @@ -3,23 +3,23 @@ namespace ServiceStack.Redis.Tests.Support { - public class CustomTypeFactory : ModelFactoryBase - { - public CustomTypeFactory() - { - ModelConfig.Id(x => x.CustomId); - } + public class CustomTypeFactory : ModelFactoryBase + { + public CustomTypeFactory() + { + ModelConfig.Id(x => x.CustomId); + } - public override void AssertIsEqual(CustomType actual, CustomType expected) - { - Assert.AreEqual(actual.CustomId, expected.CustomId); - Assert.AreEqual(actual.CustomName, expected.CustomName); - } + public override void AssertIsEqual(CustomType actual, CustomType expected) + { + Assert.AreEqual(actual.CustomId, expected.CustomId); + Assert.AreEqual(actual.CustomName, expected.CustomName); + } - public override CustomType CreateInstance(int i) - { - return new CustomType { CustomId = i, CustomName = "Name" + i }; - } - } + public override CustomType CreateInstance(int i) + { + return new CustomType { CustomId = i, CustomName = "Name" + i }; + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Support/TcpClientWithTimeout.cs b/tests/ServiceStack.Redis.Tests/Support/TcpClientWithTimeout.cs index 9947c455..e9525934 100644 --- a/tests/ServiceStack.Redis.Tests/Support/TcpClientWithTimeout.cs +++ b/tests/ServiceStack.Redis.Tests/Support/TcpClientWithTimeout.cs @@ -1,98 +1,101 @@ +#if !NETCORE using System; using System.Net.Sockets; using System.Threading; namespace ServiceStack.Redis.Tests.Support { - internal class TcpClientWithTimeout - { - protected string hostname; - protected int port; - protected TimeSpan timeout; - protected TcpClient connection; - protected bool connected; - protected Exception exception; + internal class TcpClientWithTimeout + { + protected string hostname; + protected int port; + protected TimeSpan timeout; + protected TcpClient connection; + protected bool connected; + protected Exception exception; - public TcpClientWithTimeout(string hostname, int port, TimeSpan timeout) - { - this.hostname = hostname; - this.port = port; - this.timeout = timeout; - } - public TcpClient Connect() - { - // kick off the thread that tries to connect - connected = false; - exception = null; + public TcpClientWithTimeout(string hostname, int port, TimeSpan timeout) + { + this.hostname = hostname; + this.port = port; + this.timeout = timeout; + } + public TcpClient Connect() + { + // kick off the thread that tries to connect + connected = false; + exception = null; - var backgroundThread = new Thread(BeginConnect) { - IsBackground = true - }; - // wont prevent the process from terminating while it does the long timeout - backgroundThread.Start(); + var backgroundThread = new Thread(BeginConnect) + { + IsBackground = true + }; + // wont prevent the process from terminating while it does the long timeout + backgroundThread.Start(); - // wait for either the timeout or the thread to finish - backgroundThread.Join(timeout); + // wait for either the timeout or the thread to finish + backgroundThread.Join(timeout); - if (connected) - { - // it succeeded, so return the connection - backgroundThread.Abort(); - return connection; - } - if (exception != null) - { - // it crashed, so return the exception to the caller - backgroundThread.Abort(); - throw exception; - } - else - { - // if it gets here, it timed out, so abort the thread and throw an exception - backgroundThread.Abort(); - var message = string.Format("TcpClient connection to {0}:{1} timed out", - hostname, port); - throw new TimeoutException(message); - } - } - protected void BeginConnect() - { - try - { - connection = new TcpClient(hostname, port); - // record that it succeeded, for the main thread to return to the caller - connected = true; - } - catch (Exception ex) - { - // record the exception for the main thread to re-throw back to the calling code - exception = ex; - } - } - } + if (connected) + { + // it succeeded, so return the connection + backgroundThread.Abort(); + return connection; + } + if (exception != null) + { + // it crashed, so return the exception to the caller + backgroundThread.Abort(); + throw exception; + } + else + { + // if it gets here, it timed out, so abort the thread and throw an exception + backgroundThread.Abort(); + var message = string.Format("TcpClient connection to {0}:{1} timed out", + hostname, port); + throw new TimeoutException(message); + } + } + protected void BeginConnect() + { + try + { + connection = new TcpClient(hostname, port); + // record that it succeeded, for the main thread to return to the caller + connected = true; + } + catch (Exception ex) + { + // record the exception for the main thread to re-throw back to the calling code + exception = ex; + } + } + } - internal class TcpClientExample - { - void Main() - { - // connect with a 5 second timeout on the connection - var connection = new TcpClientWithTimeout( - "www.google.com", 80, TimeSpan.FromSeconds(5)).Connect(); + internal class TcpClientExample + { + void Main() + { + // connect with a 5 second timeout on the connection + var connection = new TcpClientWithTimeout( + "www.google.com", 80, TimeSpan.FromSeconds(5)).Connect(); - var stream = connection.GetStream(); + var stream = connection.GetStream(); - // Send 10 bytes - byte[] toSend = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xa }; - stream.Write(toSend, 0, toSend.Length); + // Send 10 bytes + byte[] toSend = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xa }; + stream.Write(toSend, 0, toSend.Length); - // ReceiveMessages 10 bytes - var readbuf = new byte[10]; // you must allocate space first - stream.ReadTimeout = 10000; // 10 second timeout on the read - stream.Read(readbuf, 0, 10); // read + // ReceiveMessages 10 bytes + var readbuf = new byte[10]; // you must allocate space first + stream.ReadTimeout = 10000; // 10 second timeout on the read + stream.Read(readbuf, 0, 10); // read - // Disconnect nicely - stream.Close(); // workaround for a .net bug: http://support.microsoft.com/kb/821625 - connection.Close(); - } - } -} \ No newline at end of file + // Disconnect nicely + stream.Close(); // workaround for a .net bug: http://support.microsoft.com/kb/821625 + connection.Close(); + } + } +} +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/TestConfig.cs b/tests/ServiceStack.Redis.Tests/TestConfig.cs index 446c5a6b..744091c8 100644 --- a/tests/ServiceStack.Redis.Tests/TestConfig.cs +++ b/tests/ServiceStack.Redis.Tests/TestConfig.cs @@ -1,26 +1,43 @@ -using ServiceStack.Common.Support; +using System; using ServiceStack.Logging; +using ServiceStack.Support; namespace ServiceStack.Redis.Tests { - public static class TestConfig - { - static TestConfig() - { - LogManager.LogFactory = new InMemoryLogFactory(); - } + public static class TestConfig + { + static TestConfig() + { + LogManager.LogFactory = new InMemoryLogFactory(); + } - public const bool IgnoreLongTests = true; + public static bool IgnoreLongTests = true; + + public static string SingleHost => Environment.GetEnvironmentVariable("CI_REDIS") ?? "localhost"; + + public static string GeoHost => Environment.GetEnvironmentVariable("CI_REDIS") ?? "10.0.0.121"; - public const string SingleHost = "localhost"; public static readonly string[] MasterHosts = new[] { "localhost" }; - public static readonly string[] SlaveHosts = new[] { "localhost" }; + public static readonly string[] ReplicaHosts = new[] { "localhost" }; + + public const int RedisPort = 6379; - public const int RedisPort = 6379; - public const int AlchemyPort = 6380; + public static string SingleHostConnectionString + { + get + { + return SingleHost + ":" + RedisPort; + } + } - //public const string SingleHost = "chi-dev-mem1.ddnglobal.local"; - //public static readonly string [] MasterHosts = new[] { "chi-dev-mem1.ddnglobal.local" }; - //public static readonly string [] SlaveHosts = new[] { "chi-dev-mem1.ddnglobal.local", "chi-dev-mem2.ddnglobal.local" }; - } + public static BasicRedisClientManager BasicClientManger + { + get + { + return new BasicRedisClientManager(new[] { + SingleHostConnectionString + }); + } + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/TestData/PopulateTestData.cs b/tests/ServiceStack.Redis.Tests/TestData/PopulateTestData.cs index b1ced555..9ca30eb8 100644 --- a/tests/ServiceStack.Redis.Tests/TestData/PopulateTestData.cs +++ b/tests/ServiceStack.Redis.Tests/TestData/PopulateTestData.cs @@ -3,14 +3,14 @@ namespace ServiceStack.Redis.Tests.TestData { - /// - /// Simple class to populate redis with some test data - /// - [TestFixture, Category("Integration")] - public class PopulateTestData - : RedisClientTestsBase - { - const string StringId = "urn:populatetest:string"; + /// + /// Simple class to populate redis with some test data + /// + [TestFixture, Category("Integration")] + public class PopulateTestData + : RedisClientTestsBase + { + const string StringId = "urn:populatetest:string"; const string ListId = "urn:populatetest:list"; const string SetId = "urn:populatetest:set"; const string SortedSetId = "urn:populatetest:zset"; @@ -21,43 +21,43 @@ public PopulateTestData() CleanMask = "urn:populatetest:*"; } - private readonly List items = new List { "one", "two", "three", "four" }; - private readonly Dictionary map = new Dictionary { - {"A","one"}, - {"B","two"}, - {"C","three"}, - {"D","four"}, - }; - - [Test] - public void Populate_Strings() - { - items.ForEach(x => Redis.Set(StringId + ":" + x, x)); - } - - [Test] - public void Populate_List() - { - items.ForEach(x => Redis.AddItemToList(ListId, x)); - } - - [Test] - public void Populate_Set() - { - items.ForEach(x => Redis.AddItemToSet(SetId, x)); - } - - [Test] - public void Populate_SortedSet() - { - var i = 0; - items.ForEach(x => Redis.AddItemToSortedSet(SortedSetId, x, i++)); - } - - [Test] - public void Populate_Hash() - { - Redis.SetRangeInHash(HashId, map); - } - } + private readonly List items = new List { "one", "two", "three", "four" }; + private readonly Dictionary map = new Dictionary { + {"A","one"}, + {"B","two"}, + {"C","three"}, + {"D","four"}, + }; + + [Test] + public void Populate_Strings() + { + items.ForEach(x => Redis.Set(StringId + ":" + x, x)); + } + + [Test] + public void Populate_List() + { + items.ForEach(x => Redis.AddItemToList(ListId, x)); + } + + [Test] + public void Populate_Set() + { + items.ForEach(x => Redis.AddItemToSet(SetId, x)); + } + + [Test] + public void Populate_SortedSet() + { + var i = 0; + items.ForEach(x => Redis.AddItemToSortedSet(SortedSetId, x, i++)); + } + + [Test] + public void Populate_Hash() + { + Redis.SetRangeInHash(HashId, map); + } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/TrackThreadTests.cs b/tests/ServiceStack.Redis.Tests/TrackThreadTests.cs new file mode 100644 index 00000000..88ed49aa --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/TrackThreadTests.cs @@ -0,0 +1,98 @@ +using System; +using System.Threading; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class TrackThreadTests + { + [Test] + public void Does_throw_when_using_same_client_on_different_threads() + { + RedisConfig.AssertAccessOnlyOnSameThread = true; + InvalidAccessException poolEx = null; + + var redisManager = new RedisManagerPool(); + + using (var redis = redisManager.GetClient()) + { + var threadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var key = $"Thread#{threadId}"; + redis.SetValue(key, threadId); + + ThreadPool.QueueUserWorkItem(_ => + { + using (var poolRedis = redisManager.GetClient()) + { + var poolThreadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var poolKey = $"Thread#{poolThreadId}"; + poolRedis.SetValue(poolKey , poolThreadId); + + Console.WriteLine("From Pool: " + poolRedis.GetValue(poolKey)); + + try + { + Console.WriteLine("From Pool (using TEST): " + redis.GetValue(poolKey)); + } + catch (InvalidAccessException ex) + { + poolEx = ex; + } + } + }); + + Thread.Sleep(100); + + Console.WriteLine("From Test: " + redis.GetValue(key)); + + if (poolEx == null) + throw new Exception("Should throw InvalidAccessException"); + + Console.WriteLine("InvalidAccessException: " + poolEx.Message); + } + + RedisConfig.AssertAccessOnlyOnSameThread = false; + } + + [Test] + public void Does_not_throw_when_using_different_clients_on_same_Thread() + { + RedisConfig.AssertAccessOnlyOnSameThread = true; + InvalidAccessException poolEx = null; + + var redisManager = new RedisManagerPool(); + + using (var redis = redisManager.GetClient()) + { + var threadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var key = $"Thread#{threadId}"; + redis.SetValue(key, threadId); + + ThreadPool.QueueUserWorkItem(_ => + { + try + { + using (var poolRedis = redisManager.GetClient()) + { + var poolThreadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var poolKey = $"Thread#{poolThreadId}"; + poolRedis.SetValue(poolKey , poolThreadId); + + Console.WriteLine("From Pool: " + poolRedis.GetValue(poolKey )); + } + } + catch (InvalidAccessException ex) + { + poolEx = ex; + } + }); + + Thread.Sleep(100); + + Console.WriteLine("From Test: " + redis.GetValue(key)); + } + + RedisConfig.AssertAccessOnlyOnSameThread = false; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs b/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs new file mode 100644 index 00000000..a69f6e9d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs @@ -0,0 +1,23 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Ignore("Integration")] + public class TwemproxyTests + { + [Test] + public void Can_connect_to_twemproxy() + { + var redis = new RedisClient("10.0.0.14", 22121) + { + //ServerVersionNumber = 2611 + }; + //var redis = new RedisClient("10.0.0.14"); + redis.SetValue("foo", "bar"); + var foo = redis.GetValue("foo"); + + Assert.That(foo, Is.EqualTo("bar")); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.Async.cs new file mode 100644 index 00000000..f6736364 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.Async.cs @@ -0,0 +1,436 @@ +using NUnit.Framework; +using ServiceStack.Caching; +using ServiceStack.Logging; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class UserSessionTestsAsync + { + static UserSessionTestsAsync() + { + LogManager.LogFactory = new ConsoleLogFactory(); + } + + //MasterUser master; + + static readonly Guid UserClientGlobalId1 = new Guid("71A30DE3-D7AF-4B8E-BCA2-AB646EE1F3E9"); + static readonly Guid UserClientGlobalId2 = new Guid("A8D300CF-0414-4C99-A495-A7F34C93CDE1"); + static readonly string UserClientKey = new Guid("10B7D0F7-4D4E-4676-AAC7-CF0234E9133E").ToString("N"); + static readonly Guid UserId = new Guid("5697B030-A369-43A2-A842-27303A0A62BC"); + private const string UserName = "User1"; + private const string ShardId = "0"; + + readonly UserClientSession session = new UserClientSession( + Guid.NewGuid(), UserId, "192.168.0.1", UserClientKey, UserClientGlobalId1); + + private RedisClient redisCache; + + [SetUp] + public void OnBeforeEachTest() + { + redisCache = new RedisClient(TestConfig.SingleHost); + redisCache.FlushAll(); + //master = UserMasterDataAccessModel.Instance.MasterUsers.NewDataAccessObject(true); + } + + public CachedUserSessionManagerAsync GetCacheManager(ICacheClientAsync cacheClient) + { + return new CachedUserSessionManagerAsync(cacheClient); + } + + private static void AssertClientSessionsAreEqual( + UserClientSession clientSession, UserClientSession resolvedClientSession) + { + Assert.That(resolvedClientSession.Id, Is.EqualTo(clientSession.Id)); + Assert.That(resolvedClientSession.Base64ClientModulus, Is.EqualTo(clientSession.Base64ClientModulus)); + Assert.That(resolvedClientSession.IPAddress, Is.EqualTo(clientSession.IPAddress)); + Assert.That(resolvedClientSession.UserClientGlobalId, Is.EqualTo(clientSession.UserClientGlobalId)); + Assert.That(resolvedClientSession.UserId, Is.EqualTo(clientSession.UserId)); + } + + [Test] + public async Task Can_add_single_UserSession() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var resolvedClientSession = await cacheManager.GetUserClientSessionAsync( + clientSession.UserId, clientSession.Id); + + AssertClientSessionsAreEqual(clientSession, resolvedClientSession); + } + + [Test] + public async Task Can_add_multiple_UserClientSessions() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession1 = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var clientSession2 = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId2); + + var resolvedClientSession1 = await cacheManager.GetUserClientSessionAsync( + clientSession1.UserId, clientSession1.Id); + + var resolvedClientSession2 = await cacheManager.GetUserClientSessionAsync( + clientSession2.UserId, clientSession2.Id); + + AssertClientSessionsAreEqual(clientSession1, resolvedClientSession1); + AssertClientSessionsAreEqual(clientSession2, resolvedClientSession2); + } + + [Test] + public async Task Does_remove_UserClientSession() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession1 = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var userSession = await cacheManager.GetUserSessionAsync(UserId); + var resolvedClientSession1 = userSession.GetClientSession(clientSession1.Id); + AssertClientSessionsAreEqual(resolvedClientSession1, clientSession1); + + resolvedClientSession1.ExpiryDate = DateTime.UtcNow.AddSeconds(-1); + await cacheManager.UpdateUserSessionAsync(userSession); + + userSession = await cacheManager.GetUserSessionAsync(UserId); + Assert.That(userSession, Is.Null); + } + + } + + public class CachedUserSessionManagerAsync + { + private static readonly ILog Log = LogManager.GetLogger(typeof(CachedUserSessionManager)); + + /// + /// Google/Yahoo seems to make you to login every 2 weeks?? + /// + private readonly ICacheClientAsync cacheClient; + + /// + /// Big perf hit if we Log on every session change + /// + /// The FMT. + /// The args. + [Conditional("DEBUG")] + protected void LogIfDebug(string fmt, params object[] args) + { + if (args.Length > 0) + Log.DebugFormat(fmt, args); + else + Log.Debug(fmt); + } + + public CachedUserSessionManagerAsync(ICacheClientAsync cacheClient) + { + this.cacheClient = cacheClient; + } + + /// + /// Removes the client session. + /// + /// The user global id. + /// The client session ids. + public async ValueTask RemoveClientSession(Guid userId, ICollection clientSessionIds) + { + var userSession = await this.GetUserSessionAsync(userId); + if (userSession == null) return; + + foreach (var clientSessionId in clientSessionIds) + { + userSession.RemoveClientSession(clientSessionId); + } + await this.UpdateUserSessionAsync(userSession); + } + + /// + /// Adds a new client session. + /// Should this be changed to GetOrCreateClientSession? + /// + /// The user global id. + /// Title of the user. + /// + /// The ip address. + /// The base64 client modulus. + /// The user client global id. + /// + public async ValueTask StoreClientSessionAsync(Guid userId, string userName, string shardId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + var userSession = await this.GetOrCreateSessionAsync(userId, userName, shardId); + + var existingClientSession = userSession.GetClientSessionWithClientId(userClientGlobalId); + if (existingClientSession != null) + { + userSession.RemoveClientSession(existingClientSession.Id); + } + + var newClientSession = userSession.CreateNewClientSession( + ipAddress, base64ClientModulus, userClientGlobalId); + + await this.UpdateUserSessionAsync(userSession); + + return newClientSession; + } + + /// + /// Updates the UserSession in the cache, or removes expired ones. + /// + /// The user session. + public async ValueTask UpdateUserSessionAsync(UserSessionAsync userSession) + { + var hasSessionExpired = userSession.HasExpired(); + if (hasSessionExpired) + { + LogIfDebug("Session has expired, removing: " + userSession.ToCacheKey()); + await this.cacheClient.RemoveAsync(userSession.ToCacheKey()); + } + else + { + LogIfDebug("Updating session: " + userSession.ToCacheKey()); + await this.cacheClient.ReplaceAsync(userSession.ToCacheKey(), userSession, userSession.ExpiryDate.Value); + } + } + + /// + /// Gets the user session if it exists or null. + /// + /// The user global id. + /// + public async ValueTask GetUserSessionAsync(Guid userId) + { + var cacheKey = UserSession.ToCacheKey(userId); + var bytes = await this.cacheClient.GetAsync(cacheKey); + if (bytes != null) + { + var modelStr = Encoding.UTF8.GetString(bytes); + LogIfDebug("UserSession => " + modelStr); + } + return await this.cacheClient.GetAsync(cacheKey); + } + + /// + /// Gets or create a user session if one doesn't exist. + /// + /// The user global id. + /// Title of the user. + /// + /// + public async ValueTask GetOrCreateSessionAsync(Guid userId, string userName, string shardId) + { + var userSession = await this.GetUserSessionAsync(userId); + if (userSession == null) + { + userSession = new UserSessionAsync(userId, userName, shardId); + + await this.cacheClient.AddAsync(userSession.ToCacheKey(), userSession, + userSession.ExpiryDate.GetValueOrDefault(DateTime.UtcNow) + TimeSpan.FromHours(1)); + } + return userSession; + } + + /// + /// Gets the user client session identified by the id if exists otherwise null. + /// + /// The user global id. + /// The client session id. + /// + public async ValueTask GetUserClientSessionAsync(Guid userId, Guid clientSessionId) + { + var userSession = await this.GetUserSessionAsync(userId); + return userSession != null ? userSession.GetClientSession(clientSessionId) : null; + } + } + +#if !NETCORE + [Serializable /* was required when storing in memcached, not required in Redis */] +#endif + public class UserSessionAsync + { + //Empty constructor required for TypeSerializer + public UserSessionAsync() + { + this.PublicClientSessions = new Dictionary(); + } + + public Guid UserId { get; private set; } + + public string UserName { get; private set; } + + public string ShardId { get; private set; } + + public Dictionary PublicClientSessions { get; private set; } + + public UserSessionAsync(Guid userId, string userName, string shardId) + : this() + { + this.UserId = userId; + this.UserName = userName; + this.ShardId = shardId; + } + + /// + /// Gets the max expiry date of all the users client sessions. + /// If the user has no more active client sessions we can remove them from the cache. + /// + /// The expiry date. + public DateTime? ExpiryDate + { + get + { + DateTime? maxExpiryDate = null; + + foreach (var session in this.PublicClientSessions.Values) + { + if (maxExpiryDate == null || session.ExpiryDate > maxExpiryDate) + { + maxExpiryDate = session.ExpiryDate; + } + } + return maxExpiryDate; + } + } + + /// + /// Creates a new client session for the user. + /// + /// The ip address. + /// The base64 client modulus. + /// The user client global id. + /// + public UserClientSession CreateNewClientSession(string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + return this.CreateClientSession(Guid.NewGuid(), ipAddress, base64ClientModulus, userClientGlobalId); + } + + public UserClientSession CreateClientSession(Guid sessionId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + var clientSession = new UserClientSession( + sessionId, this.UserId, ipAddress, base64ClientModulus, userClientGlobalId); + + this.PublicClientSessions[clientSession.Id] = clientSession; + + return clientSession; + } + + /// + /// Removes the client session. + /// + /// The client session id. + public void RemoveClientSession(Guid clientSessionId) + { + if (this.PublicClientSessions.ContainsKey(clientSessionId)) + { + this.PublicClientSessions.Remove(clientSessionId); + } + } + + public UserClientSession GetClientSessionWithClientId(Guid userClientId) + { + foreach (var entry in this.PublicClientSessions) + { + if (entry.Value.UserClientGlobalId == userClientId) + { + return entry.Value; + } + } + + return null; + } + + /// + /// Verifies this UserSession, removing any expired sessions. + /// Returns true to keep the UserSession in the cache. + /// + /// + /// true if this session has any active client sessions; otherwise, false. + /// + public bool HasExpired() + { + RemoveExpiredSessions(this.PublicClientSessions); + + //If there are no more active client sessions we can remove the entire UserSessions + var sessionHasExpired = + this.ExpiryDate == null //There are no UserClientSessions + || this.ExpiryDate.Value <= DateTime.UtcNow; //The max UserClientSession ExpiryDate has expired + + return sessionHasExpired; + } + + private static void RemoveExpiredSessions(IDictionary clientSessions) + { + var expiredSessionKeys = new List(); + + foreach (var clientSession in clientSessions) + { + if (clientSession.Value.ExpiryDate < DateTime.UtcNow) + { + expiredSessionKeys.Add(clientSession.Key); + } + } + + foreach (var sessionKey in expiredSessionKeys) + { + clientSessions.Remove(sessionKey); + } + } + + public void RemoveAllSessions() + { + this.PublicClientSessions.Clear(); + } + + public UserClientSession GetClientSession(Guid clientSessionId) + { + UserClientSession session; + + if (this.PublicClientSessions.TryGetValue(clientSessionId, out session)) + { + return session; + } + + return null; + } + + public string ToCacheKey() + { + return ToCacheKey(this.UserId); + } + + public static string ToCacheKey(Guid userId) + { + return UrnId.Create(userId.ToString()); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs index 4d837946..10ba3109 100644 --- a/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs @@ -3,463 +3,464 @@ using System.Diagnostics; using System.Text; using NUnit.Framework; -using ServiceStack.CacheAccess; -using ServiceStack.Common; -using ServiceStack.DesignPatterns.Model; +using ServiceStack.Caching; +using ServiceStack.Model; using ServiceStack.Logging; -using ServiceStack.Logging.Support.Logging; namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class UserSessionTests - { - static UserSessionTests() - { - LogManager.LogFactory = new ConsoleLogFactory(); - } - - //MasterUser master; - - static readonly Guid UserClientGlobalId1 = new Guid("71A30DE3-D7AF-4B8E-BCA2-AB646EE1F3E9"); - static readonly Guid UserClientGlobalId2 = new Guid("A8D300CF-0414-4C99-A495-A7F34C93CDE1"); - static readonly string UserClientKey = new Guid("10B7D0F7-4D4E-4676-AAC7-CF0234E9133E").ToString("N"); - static readonly Guid UserId = new Guid("5697B030-A369-43A2-A842-27303A0A62BC"); - private const string UserName = "User1"; - private const string ShardId = "0"; - - readonly UserClientSession session = new UserClientSession( - Guid.NewGuid(), UserId, "192.168.0.1", UserClientKey, UserClientGlobalId1); - - private RedisClient redisCache; - - [SetUp] - public void OnBeforeEachTest() - { - redisCache = new RedisClient(TestConfig.SingleHost); - redisCache.FlushAll(); - //master = UserMasterDataAccessModel.Instance.MasterUsers.NewDataAccessObject(true); - } - - public CachedUserSessionManager GetCacheManager(ICacheClient cacheClient) - { - return new CachedUserSessionManager(cacheClient); - } - - private static void AssertClientSessionsAreEqual( - UserClientSession clientSession, UserClientSession resolvedClientSession) - { - Assert.That(resolvedClientSession.Id, Is.EqualTo(clientSession.Id)); - Assert.That(resolvedClientSession.Base64ClientModulus, Is.EqualTo(clientSession.Base64ClientModulus)); - Assert.That(resolvedClientSession.IPAddress, Is.EqualTo(clientSession.IPAddress)); - Assert.That(resolvedClientSession.UserClientGlobalId, Is.EqualTo(clientSession.UserClientGlobalId)); - Assert.That(resolvedClientSession.UserId, Is.EqualTo(clientSession.UserId)); - } - - [Test] - public void Can_add_single_UserSession() - { - var cacheManager = GetCacheManager(redisCache); - - var clientSession = cacheManager.StoreClientSession( - UserId, - UserName, - ShardId, - session.IPAddress, - UserClientKey, - UserClientGlobalId1); - - var resolvedClientSession = cacheManager.GetUserClientSession( - clientSession.UserId, clientSession.Id); - - AssertClientSessionsAreEqual(clientSession, resolvedClientSession); - } - - [Test] - public void Can_add_multiple_UserClientSessions() - { - var cacheManager = GetCacheManager(redisCache); - - var clientSession1 = cacheManager.StoreClientSession( - UserId, - UserName, - ShardId, - session.IPAddress, - UserClientKey, - UserClientGlobalId1); - - var clientSession2 = cacheManager.StoreClientSession( - UserId, - UserName, - ShardId, - session.IPAddress, - UserClientKey, - UserClientGlobalId2); - - var resolvedClientSession1 = cacheManager.GetUserClientSession( - clientSession1.UserId, clientSession1.Id); - - var resolvedClientSession2 = cacheManager.GetUserClientSession( - clientSession2.UserId, clientSession2.Id); - - AssertClientSessionsAreEqual(clientSession1, resolvedClientSession1); - AssertClientSessionsAreEqual(clientSession2, resolvedClientSession2); - } - - [Test] - public void Does_remove_UserClientSession() - { - var cacheManager = GetCacheManager(redisCache); - - var clientSession1 = cacheManager.StoreClientSession( - UserId, - UserName, - ShardId, - session.IPAddress, - UserClientKey, - UserClientGlobalId1); - - var userSession = cacheManager.GetUserSession(UserId); - var resolvedClientSession1 = userSession.GetClientSession(clientSession1.Id); - AssertClientSessionsAreEqual(resolvedClientSession1, clientSession1); - - resolvedClientSession1.ExpiryDate = DateTime.UtcNow.AddSeconds(-1); - cacheManager.UpdateUserSession(userSession); - - userSession = cacheManager.GetUserSession(UserId); - Assert.That(userSession, Is.Null); - } - - } - - public class CachedUserSessionManager - { - private static readonly ILog Log = LogManager.GetLogger(typeof(CachedUserSessionManager)); - - /// - /// Google/Yahoo seems to make you to login every 2 weeks?? - /// - private readonly ICacheClient cacheClient; - - /// - /// Big perf hit if we Log on every session change - /// - /// The FMT. - /// The args. - [Conditional("DEBUG")] - protected void LogIfDebug(string fmt, params object[] args) - { - if (args.Length > 0) - Log.DebugFormat(fmt, args); - else - Log.Debug(fmt); - } - - public CachedUserSessionManager(ICacheClient cacheClient) - { - this.cacheClient = cacheClient; - } - - /// - /// Removes the client session. - /// - /// The user global id. - /// The client session ids. - public void RemoveClientSession(Guid userId, ICollection clientSessionIds) - { - var userSession = this.GetUserSession(userId); - if (userSession == null) return; - - foreach (var clientSessionId in clientSessionIds) - { - userSession.RemoveClientSession(clientSessionId); - } - this.UpdateUserSession(userSession); - } - - /// - /// Adds a new client session. - /// Should this be changed to GetOrCreateClientSession? - /// - /// The user global id. - /// Title of the user. - /// - /// The ip address. - /// The base64 client modulus. - /// The user client global id. - /// - public UserClientSession StoreClientSession(Guid userId, string userName, string shardId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) - { - var userSession = this.GetOrCreateSession(userId, userName, shardId); - - var existingClientSession = userSession.GetClientSessionWithClientId(userClientGlobalId); - if (existingClientSession != null) - { - userSession.RemoveClientSession(existingClientSession.Id); - } - - var newClientSession = userSession.CreateNewClientSession( - ipAddress, base64ClientModulus, userClientGlobalId); - - this.UpdateUserSession(userSession); - - return newClientSession; - } - - /// - /// Updates the UserSession in the cache, or removes expired ones. - /// - /// The user session. - public void UpdateUserSession(UserSession userSession) - { - var hasSessionExpired = userSession.HasExpired(); - if (hasSessionExpired) - { - LogIfDebug("Session has expired, removing: " + userSession.ToCacheKey()); - this.cacheClient.Remove(userSession.ToCacheKey()); - } - else - { - LogIfDebug("Updating session: " + userSession.ToCacheKey()); - this.cacheClient.Replace(userSession.ToCacheKey(), userSession, userSession.ExpiryDate.Value); - } - } - - /// - /// Gets the user session if it exists or null. - /// - /// The user global id. - /// - public UserSession GetUserSession(Guid userId) - { - var cacheKey = UserSession.ToCacheKey(userId); - var bytes = this.cacheClient.Get(cacheKey); - if (bytes != null) - { - var modelStr = Encoding.UTF8.GetString(bytes); - LogIfDebug("UserSession => " + modelStr); - } - return this.cacheClient.Get(cacheKey); - } - - /// - /// Gets or create a user session if one doesn't exist. - /// - /// The user global id. - /// Title of the user. - /// - /// - public UserSession GetOrCreateSession(Guid userId, string userName, string shardId) - { - var userSession = this.GetUserSession(userId); - if (userSession == null) - { - userSession = new UserSession(userId, userName, shardId); - - this.cacheClient.Add(userSession.ToCacheKey(), userSession, - userSession.ExpiryDate.GetValueOrDefault(DateTime.UtcNow) + TimeSpan.FromHours(1)); - } - return userSession; - } - - /// - /// Gets the user client session identified by the id if exists otherwise null. - /// - /// The user global id. - /// The client session id. - /// - public UserClientSession GetUserClientSession(Guid userId, Guid clientSessionId) - { - var userSession = this.GetUserSession(userId); - return userSession != null ? userSession.GetClientSession(clientSessionId) : null; - } - } - - - [Serializable /* was required when storing in memcached, not required in Redis */] - public class UserSession - { - //Empty constructor required for TypeSerializer - public UserSession() - { - this.PublicClientSessions = new Dictionary(); - } - - public Guid UserId { get; private set; } - - public string UserName { get; private set; } - - public string ShardId { get; private set; } - - public Dictionary PublicClientSessions { get; private set; } - - public UserSession(Guid userId, string userName, string shardId) - : this() - { - this.UserId = userId; - this.UserName = userName; - this.ShardId = shardId; - } - - /// - /// Gets the max expiry date of all the users client sessions. - /// If the user has no more active client sessions we can remove them from the cache. - /// - /// The expiry date. - public DateTime? ExpiryDate - { - get - { - DateTime? maxExpiryDate = null; - - foreach (var session in this.PublicClientSessions.Values) - { - if (maxExpiryDate == null || session.ExpiryDate > maxExpiryDate) - { - maxExpiryDate = session.ExpiryDate; - } - } - return maxExpiryDate; - } - } - - /// - /// Creates a new client session for the user. - /// - /// The ip address. - /// The base64 client modulus. - /// The user client global id. - /// - public UserClientSession CreateNewClientSession(string ipAddress, string base64ClientModulus, Guid userClientGlobalId) - { - return this.CreateClientSession(Guid.NewGuid(), ipAddress, base64ClientModulus, userClientGlobalId); - } - - public UserClientSession CreateClientSession(Guid sessionId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) - { - var clientSession = new UserClientSession( - sessionId, this.UserId, ipAddress, base64ClientModulus, userClientGlobalId); - - this.PublicClientSessions[clientSession.Id] = clientSession; - - return clientSession; - } - - /// - /// Removes the client session. - /// - /// The client session id. - public void RemoveClientSession(Guid clientSessionId) - { - if (this.PublicClientSessions.ContainsKey(clientSessionId)) - { - this.PublicClientSessions.Remove(clientSessionId); - } - } - - public UserClientSession GetClientSessionWithClientId(Guid userClientId) - { - foreach (var entry in this.PublicClientSessions) - { - if (entry.Value.UserClientGlobalId == userClientId) - { - return entry.Value; - } - } - - return null; - } - - /// - /// Verifies this UserSession, removing any expired sessions. - /// Returns true to keep the UserSession in the cache. - /// - /// - /// true if this session has any active client sessions; otherwise, false. - /// - public bool HasExpired() - { - RemoveExpiredSessions(this.PublicClientSessions); - - //If there are no more active client sessions we can remove the entire UserSessions - var sessionHasExpired = - this.ExpiryDate == null //There are no UserClientSessions - || this.ExpiryDate.Value <= DateTime.UtcNow; //The max UserClientSession ExpiryDate has expired - - return sessionHasExpired; - } - - private static void RemoveExpiredSessions(IDictionary clientSessions) - { - var expiredSessionKeys = new List(); - - foreach (var clientSession in clientSessions) - { - if (clientSession.Value.ExpiryDate < DateTime.UtcNow) - { - expiredSessionKeys.Add(clientSession.Key); - } - } - - foreach (var sessionKey in expiredSessionKeys) - { - clientSessions.Remove(sessionKey); - } - } - - public void RemoveAllSessions() - { - this.PublicClientSessions.Clear(); - } - - public UserClientSession GetClientSession(Guid clientSessionId) - { - UserClientSession session; - - if (this.PublicClientSessions.TryGetValue(clientSessionId, out session)) - { - return session; - } - - return null; - } - - public string ToCacheKey() - { - return ToCacheKey(this.UserId); - } - - public static string ToCacheKey(Guid userId) - { - return UrnId.Create(userId.ToString()); - } - } - - [Serializable] - public class UserClientSession - : IHasGuidId - { - private const int ValidForTwoWeeks = 14; - public string IPAddress { get; private set; } - public DateTime ExpiryDate { get; set; } - - //Empty constructor required for TypeSerializer - public UserClientSession() { } - - public UserClientSession(Guid sessionId, Guid userId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) - { - this.Id = sessionId; - this.UserId = userId; - this.IPAddress = ipAddress; - this.Base64ClientModulus = base64ClientModulus; - this.UserClientGlobalId = userClientGlobalId; - this.ExpiryDate = DateTime.UtcNow.AddDays(ValidForTwoWeeks); - } - - public Guid Id { get; set; } - public Guid UserId { get; set; } - public string Base64ClientModulus { get; set; } - public Guid UserClientGlobalId { get; set; } - } + [TestFixture, Category("Integration")] + public class UserSessionTests + { + static UserSessionTests() + { + LogManager.LogFactory = new ConsoleLogFactory(); + } + + //MasterUser master; + + static readonly Guid UserClientGlobalId1 = new Guid("71A30DE3-D7AF-4B8E-BCA2-AB646EE1F3E9"); + static readonly Guid UserClientGlobalId2 = new Guid("A8D300CF-0414-4C99-A495-A7F34C93CDE1"); + static readonly string UserClientKey = new Guid("10B7D0F7-4D4E-4676-AAC7-CF0234E9133E").ToString("N"); + static readonly Guid UserId = new Guid("5697B030-A369-43A2-A842-27303A0A62BC"); + private const string UserName = "User1"; + private const string ShardId = "0"; + + readonly UserClientSession session = new UserClientSession( + Guid.NewGuid(), UserId, "192.168.0.1", UserClientKey, UserClientGlobalId1); + + private RedisClient redisCache; + + [SetUp] + public void OnBeforeEachTest() + { + redisCache = new RedisClient(TestConfig.SingleHost); + redisCache.FlushAll(); + //master = UserMasterDataAccessModel.Instance.MasterUsers.NewDataAccessObject(true); + } + + public CachedUserSessionManager GetCacheManager(ICacheClient cacheClient) + { + return new CachedUserSessionManager(cacheClient); + } + + private static void AssertClientSessionsAreEqual( + UserClientSession clientSession, UserClientSession resolvedClientSession) + { + Assert.That(resolvedClientSession.Id, Is.EqualTo(clientSession.Id)); + Assert.That(resolvedClientSession.Base64ClientModulus, Is.EqualTo(clientSession.Base64ClientModulus)); + Assert.That(resolvedClientSession.IPAddress, Is.EqualTo(clientSession.IPAddress)); + Assert.That(resolvedClientSession.UserClientGlobalId, Is.EqualTo(clientSession.UserClientGlobalId)); + Assert.That(resolvedClientSession.UserId, Is.EqualTo(clientSession.UserId)); + } + + [Test] + public void Can_add_single_UserSession() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession = cacheManager.StoreClientSession( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var resolvedClientSession = cacheManager.GetUserClientSession( + clientSession.UserId, clientSession.Id); + + AssertClientSessionsAreEqual(clientSession, resolvedClientSession); + } + + [Test] + public void Can_add_multiple_UserClientSessions() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession1 = cacheManager.StoreClientSession( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var clientSession2 = cacheManager.StoreClientSession( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId2); + + var resolvedClientSession1 = cacheManager.GetUserClientSession( + clientSession1.UserId, clientSession1.Id); + + var resolvedClientSession2 = cacheManager.GetUserClientSession( + clientSession2.UserId, clientSession2.Id); + + AssertClientSessionsAreEqual(clientSession1, resolvedClientSession1); + AssertClientSessionsAreEqual(clientSession2, resolvedClientSession2); + } + + [Test] + public void Does_remove_UserClientSession() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession1 = cacheManager.StoreClientSession( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var userSession = cacheManager.GetUserSession(UserId); + var resolvedClientSession1 = userSession.GetClientSession(clientSession1.Id); + AssertClientSessionsAreEqual(resolvedClientSession1, clientSession1); + + resolvedClientSession1.ExpiryDate = DateTime.UtcNow.AddSeconds(-1); + cacheManager.UpdateUserSession(userSession); + + userSession = cacheManager.GetUserSession(UserId); + Assert.That(userSession, Is.Null); + } + + } + + public class CachedUserSessionManager + { + private static readonly ILog Log = LogManager.GetLogger(typeof(CachedUserSessionManager)); + + /// + /// Google/Yahoo seems to make you to login every 2 weeks?? + /// + private readonly ICacheClient cacheClient; + + /// + /// Big perf hit if we Log on every session change + /// + /// The FMT. + /// The args. + [Conditional("DEBUG")] + protected void LogIfDebug(string fmt, params object[] args) + { + if (args.Length > 0) + Log.DebugFormat(fmt, args); + else + Log.Debug(fmt); + } + + public CachedUserSessionManager(ICacheClient cacheClient) + { + this.cacheClient = cacheClient; + } + + /// + /// Removes the client session. + /// + /// The user global id. + /// The client session ids. + public void RemoveClientSession(Guid userId, ICollection clientSessionIds) + { + var userSession = this.GetUserSession(userId); + if (userSession == null) return; + + foreach (var clientSessionId in clientSessionIds) + { + userSession.RemoveClientSession(clientSessionId); + } + this.UpdateUserSession(userSession); + } + + /// + /// Adds a new client session. + /// Should this be changed to GetOrCreateClientSession? + /// + /// The user global id. + /// Title of the user. + /// + /// The ip address. + /// The base64 client modulus. + /// The user client global id. + /// + public UserClientSession StoreClientSession(Guid userId, string userName, string shardId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + var userSession = this.GetOrCreateSession(userId, userName, shardId); + + var existingClientSession = userSession.GetClientSessionWithClientId(userClientGlobalId); + if (existingClientSession != null) + { + userSession.RemoveClientSession(existingClientSession.Id); + } + + var newClientSession = userSession.CreateNewClientSession( + ipAddress, base64ClientModulus, userClientGlobalId); + + this.UpdateUserSession(userSession); + + return newClientSession; + } + + /// + /// Updates the UserSession in the cache, or removes expired ones. + /// + /// The user session. + public void UpdateUserSession(UserSession userSession) + { + var hasSessionExpired = userSession.HasExpired(); + if (hasSessionExpired) + { + LogIfDebug("Session has expired, removing: " + userSession.ToCacheKey()); + this.cacheClient.Remove(userSession.ToCacheKey()); + } + else + { + LogIfDebug("Updating session: " + userSession.ToCacheKey()); + this.cacheClient.Replace(userSession.ToCacheKey(), userSession, userSession.ExpiryDate.Value); + } + } + + /// + /// Gets the user session if it exists or null. + /// + /// The user global id. + /// + public UserSession GetUserSession(Guid userId) + { + var cacheKey = UserSession.ToCacheKey(userId); + var bytes = this.cacheClient.Get(cacheKey); + if (bytes != null) + { + var modelStr = Encoding.UTF8.GetString(bytes); + LogIfDebug("UserSession => " + modelStr); + } + return this.cacheClient.Get(cacheKey); + } + + /// + /// Gets or create a user session if one doesn't exist. + /// + /// The user global id. + /// Title of the user. + /// + /// + public UserSession GetOrCreateSession(Guid userId, string userName, string shardId) + { + var userSession = this.GetUserSession(userId); + if (userSession == null) + { + userSession = new UserSession(userId, userName, shardId); + + this.cacheClient.Add(userSession.ToCacheKey(), userSession, + userSession.ExpiryDate.GetValueOrDefault(DateTime.UtcNow) + TimeSpan.FromHours(1)); + } + return userSession; + } + + /// + /// Gets the user client session identified by the id if exists otherwise null. + /// + /// The user global id. + /// The client session id. + /// + public UserClientSession GetUserClientSession(Guid userId, Guid clientSessionId) + { + var userSession = this.GetUserSession(userId); + return userSession != null ? userSession.GetClientSession(clientSessionId) : null; + } + } + +#if !NETCORE + [Serializable /* was required when storing in memcached, not required in Redis */] +#endif + public class UserSession + { + //Empty constructor required for TypeSerializer + public UserSession() + { + this.PublicClientSessions = new Dictionary(); + } + + public Guid UserId { get; private set; } + + public string UserName { get; private set; } + + public string ShardId { get; private set; } + + public Dictionary PublicClientSessions { get; private set; } + + public UserSession(Guid userId, string userName, string shardId) + : this() + { + this.UserId = userId; + this.UserName = userName; + this.ShardId = shardId; + } + + /// + /// Gets the max expiry date of all the users client sessions. + /// If the user has no more active client sessions we can remove them from the cache. + /// + /// The expiry date. + public DateTime? ExpiryDate + { + get + { + DateTime? maxExpiryDate = null; + + foreach (var session in this.PublicClientSessions.Values) + { + if (maxExpiryDate == null || session.ExpiryDate > maxExpiryDate) + { + maxExpiryDate = session.ExpiryDate; + } + } + return maxExpiryDate; + } + } + + /// + /// Creates a new client session for the user. + /// + /// The ip address. + /// The base64 client modulus. + /// The user client global id. + /// + public UserClientSession CreateNewClientSession(string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + return this.CreateClientSession(Guid.NewGuid(), ipAddress, base64ClientModulus, userClientGlobalId); + } + + public UserClientSession CreateClientSession(Guid sessionId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + var clientSession = new UserClientSession( + sessionId, this.UserId, ipAddress, base64ClientModulus, userClientGlobalId); + + this.PublicClientSessions[clientSession.Id] = clientSession; + + return clientSession; + } + + /// + /// Removes the client session. + /// + /// The client session id. + public void RemoveClientSession(Guid clientSessionId) + { + if (this.PublicClientSessions.ContainsKey(clientSessionId)) + { + this.PublicClientSessions.Remove(clientSessionId); + } + } + + public UserClientSession GetClientSessionWithClientId(Guid userClientId) + { + foreach (var entry in this.PublicClientSessions) + { + if (entry.Value.UserClientGlobalId == userClientId) + { + return entry.Value; + } + } + + return null; + } + + /// + /// Verifies this UserSession, removing any expired sessions. + /// Returns true to keep the UserSession in the cache. + /// + /// + /// true if this session has any active client sessions; otherwise, false. + /// + public bool HasExpired() + { + RemoveExpiredSessions(this.PublicClientSessions); + + //If there are no more active client sessions we can remove the entire UserSessions + var sessionHasExpired = + this.ExpiryDate == null //There are no UserClientSessions + || this.ExpiryDate.Value <= DateTime.UtcNow; //The max UserClientSession ExpiryDate has expired + + return sessionHasExpired; + } + + private static void RemoveExpiredSessions(IDictionary clientSessions) + { + var expiredSessionKeys = new List(); + + foreach (var clientSession in clientSessions) + { + if (clientSession.Value.ExpiryDate < DateTime.UtcNow) + { + expiredSessionKeys.Add(clientSession.Key); + } + } + + foreach (var sessionKey in expiredSessionKeys) + { + clientSessions.Remove(sessionKey); + } + } + + public void RemoveAllSessions() + { + this.PublicClientSessions.Clear(); + } + + public UserClientSession GetClientSession(Guid clientSessionId) + { + UserClientSession session; + + if (this.PublicClientSessions.TryGetValue(clientSessionId, out session)) + { + return session; + } + + return null; + } + + public string ToCacheKey() + { + return ToCacheKey(this.UserId); + } + + public static string ToCacheKey(Guid userId) + { + return UrnId.Create(userId.ToString()); + } + } + +#if !NETCORE + [Serializable] +#endif + public class UserClientSession + : IHasGuidId + { + private const int ValidForTwoWeeks = 14; + public string IPAddress { get; private set; } + public DateTime ExpiryDate { get; set; } + + //Empty constructor required for TypeSerializer + public UserClientSession() { } + + public UserClientSession(Guid sessionId, Guid userId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + this.Id = sessionId; + this.UserId = userId; + this.IPAddress = ipAddress; + this.Base64ClientModulus = base64ClientModulus; + this.UserClientGlobalId = userClientGlobalId; + this.ExpiryDate = DateTime.UtcNow.AddDays(ValidForTwoWeeks); + } + + public Guid Id { get; set; } + public Guid UserId { get; set; } + public string Base64ClientModulus { get; set; } + public Guid UserClientGlobalId { get; set; } + } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ValueTypeExamples.Async.cs b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.Async.cs new file mode 100644 index 00000000..c4baa07f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.Async.cs @@ -0,0 +1,135 @@ +using NUnit.Framework; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class ValueTypeExamplesAsync + { + [SetUp] + public async Task SetUp() + { + await using var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await redisClient.FlushAllAsync(); + } + + [Test] + public async Task Working_with_int_values() + { + const string intKey = "intkey"; + const int intValue = 1; + + //STORING AN INT USING THE BASIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + await redisClient.SetValueAsync(intKey, intValue.ToString()); + string strGetIntValue = await redisClient.GetValueAsync(intKey); + int toIntValue = int.Parse(strGetIntValue); + + Assert.That(toIntValue, Is.EqualTo(intValue)); + } + + //STORING AN INT USING THE GENERIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + //Create a generic client that treats all values as ints: + IRedisTypedClientAsync intRedis = redisClient.As(); + + await intRedis.SetValueAsync(intKey, intValue); + var toIntValue = await intRedis.GetValueAsync(intKey); + + Assert.That(toIntValue, Is.EqualTo(intValue)); + } + } + + [Test] + public async Task Working_with_int_list_values() + { + const string intListKey = "intListKey"; + var intValues = new List { 2, 4, 6, 8 }; + + //STORING INTS INTO A LIST USING THE BASIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + IRedisListAsync strList = redisClient.Lists[intListKey]; + + //storing all int values in the redis list 'intListKey' as strings + await intValues.ForEachAsync(async x => await strList.AddAsync(x.ToString())); + + //retrieve all values again as strings + List strListValues = await strList.ToListAsync(); + + //convert back to list of ints + List toIntValues = strListValues.ConvertAll(x => int.Parse(x)); + + Assert.That(toIntValues, Is.EqualTo(intValues)); + + //delete all items in the list + await strList.ClearAsync(); + } + + //STORING INTS INTO A LIST USING THE GENERIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + //Create a generic client that treats all values as ints: + IRedisTypedClientAsync intRedis = redisClient.As(); + + IRedisListAsync intList = intRedis.Lists[intListKey]; + + //storing all int values in the redis list 'intListKey' as ints + await intValues.ForEachAsync(async x => await intList.AddAsync(x)); + + List toIntListValues = await intList.ToListAsync(); + + Assert.That(toIntListValues, Is.EqualTo(intValues)); + } + } + + public class IntAndString + { + public int Id { get; set; } + public string Letter { get; set; } + } + + [Test] + public async Task Working_with_Generic_types() + { + await using var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + //Create a typed Redis client that treats all values as IntAndString: + var typedRedis = redisClient.As(); + + var pocoValue = new IntAndString { Id = 1, Letter = "A" }; + await typedRedis.SetValueAsync("pocoKey", pocoValue); + IntAndString toPocoValue = await typedRedis.GetValueAsync("pocoKey"); + + Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); + Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); + + var pocoListValues = new List { + new IntAndString {Id = 2, Letter = "B"}, + new IntAndString {Id = 3, Letter = "C"}, + new IntAndString {Id = 4, Letter = "D"}, + new IntAndString {Id = 5, Letter = "E"}, + }; + + IRedisListAsync pocoList = typedRedis.Lists["pocoListKey"]; + + //Adding all IntAndString objects into the redis list 'pocoListKey' + await pocoListValues.ForEachAsync(async x => await pocoList.AddAsync(x)); + + List toPocoListValues = await pocoList.ToListAsync(); + + for (var i = 0; i < pocoListValues.Count; i++) + { + pocoValue = pocoListValues[i]; + toPocoValue = toPocoListValues[i]; + Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); + Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); + } + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs index 888266b7..f0588d62 100644 --- a/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs +++ b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs @@ -5,134 +5,134 @@ namespace ServiceStack.Redis.Tests { - [TestFixture, Category("Integration")] - public class ValueTypeExamples - { - [SetUp] - public void SetUp() - { - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - redisClient.FlushAll(); - } - } - - [Test] - public void Working_with_int_values() - { - const string intKey = "intkey"; - const int intValue = 1; - - //STORING AN INT USING THE BASIC CLIENT - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - redisClient.SetEntry(intKey, intValue.ToString()); - string strGetIntValue = redisClient.GetValue(intKey); - int toIntValue = int.Parse(strGetIntValue); - - Assert.That(toIntValue, Is.EqualTo(intValue)); - } - - //STORING AN INT USING THE GENERIC CLIENT - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - //Create a generic client that treats all values as ints: - IRedisTypedClient intRedis = redisClient.GetTypedClient(); - - intRedis.SetEntry(intKey, intValue); - var toIntValue = intRedis.GetValue(intKey); - - Assert.That(toIntValue, Is.EqualTo(intValue)); - } - } - - [Test] - public void Working_with_int_list_values() - { - const string intListKey = "intListKey"; - var intValues = new List { 2, 4, 6, 8 }; - - //STORING INTS INTO A LIST USING THE BASIC CLIENT - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - IList strList = redisClient.Lists[intListKey]; - - //storing all int values in the redis list 'intListKey' as strings - intValues.ForEach(x => strList.Add(x.ToString())); - - //retrieve all values again as strings - List strListValues = strList.ToList(); - - //convert back to list of ints - List toIntValues = strListValues.ConvertAll(x => int.Parse(x)); - - Assert.That(toIntValues, Is.EqualTo(intValues)); - - //delete all items in the list - strList.Clear(); - } - - //STORING INTS INTO A LIST USING THE GENERIC CLIENT - using (var redisClient = new RedisClient(TestConfig.SingleHost)) - { - //Create a generic client that treats all values as ints: - IRedisTypedClient intRedis = redisClient.GetTypedClient(); - - IRedisList intList = intRedis.Lists[intListKey]; - - //storing all int values in the redis list 'intListKey' as ints - intValues.ForEach(x => intList.Add(x)); - - List toIntListValues = intList.ToList(); - - Assert.That(toIntListValues, Is.EqualTo(intValues)); - } - } - - public class IntAndString - { - public int Id { get; set; } - public string Letter { get; set; } - } - - [Test] - public void Working_with_Generic_types() - { - using (var redisClient = new RedisClient()) - { - //Create a typed Redis client that treats all values as IntAndString: - var typedRedis = redisClient.GetTypedClient(); - - var pocoValue = new IntAndString { Id = 1, Letter = "A" }; - typedRedis.SetEntry("pocoKey", pocoValue); - IntAndString toPocoValue = typedRedis.GetValue("pocoKey"); - - Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); - Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); - - var pocoListValues = new List { - new IntAndString {Id = 2, Letter = "B"}, - new IntAndString {Id = 3, Letter = "C"}, - new IntAndString {Id = 4, Letter = "D"}, - new IntAndString {Id = 5, Letter = "E"}, - }; - - IRedisList pocoList = typedRedis.Lists["pocoListKey"]; - - //Adding all IntAndString objects into the redis list 'pocoListKey' - pocoListValues.ForEach(x => pocoList.Add(x)); - - List toPocoListValues = pocoList.ToList(); - - for (var i = 0; i < pocoListValues.Count; i++) - { - pocoValue = pocoListValues[i]; - toPocoValue = toPocoListValues[i]; - Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); - Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); - } - } - } - - } + [TestFixture, Category("Integration")] + public class ValueTypeExamples + { + [SetUp] + public void SetUp() + { + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + redisClient.FlushAll(); + } + } + + [Test] + public void Working_with_int_values() + { + const string intKey = "intkey"; + const int intValue = 1; + + //STORING AN INT USING THE BASIC CLIENT + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + redisClient.SetValue(intKey, intValue.ToString()); + string strGetIntValue = redisClient.GetValue(intKey); + int toIntValue = int.Parse(strGetIntValue); + + Assert.That(toIntValue, Is.EqualTo(intValue)); + } + + //STORING AN INT USING THE GENERIC CLIENT + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + //Create a generic client that treats all values as ints: + IRedisTypedClient intRedis = redisClient.As(); + + intRedis.SetValue(intKey, intValue); + var toIntValue = intRedis.GetValue(intKey); + + Assert.That(toIntValue, Is.EqualTo(intValue)); + } + } + + [Test] + public void Working_with_int_list_values() + { + const string intListKey = "intListKey"; + var intValues = new List { 2, 4, 6, 8 }; + + //STORING INTS INTO A LIST USING THE BASIC CLIENT + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + IList strList = redisClient.Lists[intListKey]; + + //storing all int values in the redis list 'intListKey' as strings + intValues.ForEach(x => strList.Add(x.ToString())); + + //retrieve all values again as strings + List strListValues = strList.ToList(); + + //convert back to list of ints + List toIntValues = strListValues.ConvertAll(x => int.Parse(x)); + + Assert.That(toIntValues, Is.EqualTo(intValues)); + + //delete all items in the list + strList.Clear(); + } + + //STORING INTS INTO A LIST USING THE GENERIC CLIENT + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + //Create a generic client that treats all values as ints: + IRedisTypedClient intRedis = redisClient.As(); + + IRedisList intList = intRedis.Lists[intListKey]; + + //storing all int values in the redis list 'intListKey' as ints + intValues.ForEach(x => intList.Add(x)); + + List toIntListValues = intList.ToList(); + + Assert.That(toIntListValues, Is.EqualTo(intValues)); + } + } + + public class IntAndString + { + public int Id { get; set; } + public string Letter { get; set; } + } + + [Test] + public void Working_with_Generic_types() + { + using (var redisClient = new RedisClient(TestConfig.SingleHost)) + { + //Create a typed Redis client that treats all values as IntAndString: + var typedRedis = redisClient.As(); + + var pocoValue = new IntAndString { Id = 1, Letter = "A" }; + typedRedis.SetValue("pocoKey", pocoValue); + IntAndString toPocoValue = typedRedis.GetValue("pocoKey"); + + Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); + Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); + + var pocoListValues = new List { + new IntAndString {Id = 2, Letter = "B"}, + new IntAndString {Id = 3, Letter = "C"}, + new IntAndString {Id = 4, Letter = "D"}, + new IntAndString {Id = 5, Letter = "E"}, + }; + + IRedisList pocoList = typedRedis.Lists["pocoListKey"]; + + //Adding all IntAndString objects into the redis list 'pocoListKey' + pocoListValues.ForEach(x => pocoList.Add(x)); + + List toPocoListValues = pocoList.ToList(); + + for (var i = 0; i < pocoListValues.Count; i++) + { + pocoValue = pocoListValues[i]; + toPocoValue = toPocoListValues[i]; + Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); + Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); + } + } + } + + } } \ No newline at end of file