diff --git a/.gitignore b/.gitignore index 692d446f..d28597e3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,7 +43,6 @@ App_Data/ NuGet/ NuGet.Signed/ packages/ -*.pfx build/ tests/ServiceStack.Redis.Tests/azureconfig.txt diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..4d7b5ce3 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,16 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": ".NET Core Launch (console)", + "type": "coreclr", + "request": "launch", + "preLaunchTask": "build", + "program": "${workspaceRoot}/tests/ServiceStack.Redis.Tests/bin/Debug/netcoreapp1.0/ServiceStack.Redis.Tests.dll", + "args": [], + "cwd": "${workspaceRoot}", + "stopAtEntry": false, + "externalConsole": false + } + ] +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 00000000..d5b6d366 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,20 @@ +{ + // See https://go.microsoft.com/fwlink/?LinkId=733558 + // for the documentation about the tasks.json format + "version": "2.0.0", + "command": "dotnet", + "args": [], + "tasks": [ + { + "label": "build", + "type": "shell", + "command": "dotnet", + "args": [ + "build", + "tests/ServiceStack.Redis.Tests" + ], + "problemMatcher": "$msCompile", + "group": "build" + } + ] +} \ No newline at end of file diff --git a/NuGet.Config b/NuGet.Config new file mode 100644 index 00000000..42daf5f4 --- /dev/null +++ b/NuGet.Config @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/NuGet.Core/ServiceStack.Redis.Core/servicestack.redis.core.nuspec b/NuGet.Core/ServiceStack.Redis.Core/servicestack.redis.core.nuspec deleted file mode 100644 index 9e3e128b..00000000 --- a/NuGet.Core/ServiceStack.Redis.Core/servicestack.redis.core.nuspec +++ /dev/null @@ -1,40 +0,0 @@ - - - - ServiceStack.Redis.Core - ServiceStack.Redis.Core - 1.0.0 - Service Stack - Service Stack - - C# Redis Client for the worlds fastest distributed NoSQL datastore. - Byte[], String and POCO Typed clients. - Thread-Safe Basic and Pooled client managers included. - - To revert back to earlier BSD v3, see: servicestack.net/download#v3 - - https://github.com/ServiceStack/ServiceStack.Redis - https://servicestack.net/terms - true - https://servicestack.net/img/logo-32.png - Redis NoSQL Client Distributed Cache PubSub Messaging Transactions - en-US - ServiceStack 2013 and contributors - - - - - - - - - - - - - - - - - - diff --git a/NuGet.Core/nuget.exe b/NuGet.Core/nuget.exe deleted file mode 100755 index 6bb79fe5..00000000 Binary files a/NuGet.Core/nuget.exe and /dev/null differ diff --git a/NuGet.Signed/ServiceStack.Redis.Signed/servicestack.redis.signed.nuspec b/NuGet.Signed/ServiceStack.Redis.Signed/servicestack.redis.signed.nuspec deleted file mode 100644 index 300b0aaf..00000000 --- a/NuGet.Signed/ServiceStack.Redis.Signed/servicestack.redis.signed.nuspec +++ /dev/null @@ -1,29 +0,0 @@ - - - - ServiceStack.Redis.Signed - ServiceStack.Redis.Signed - 4.5.0 - Service Stack - Service Stack - - C# Redis Client for the worlds fastest distributed NoSQL datastore. - Byte[], String and POCO Typed clients. - Thread-Safe Basic and Pooled client managers included. - - https://github.com/ServiceStack/ServiceStack.Redis - https://servicestack.net/terms - true - https://servicestack.net/img/logo-32.png - Redis NoSQL Client Distributed Cache PubSub Messaging Transactions - en-US - ServiceStack 2016 and contributors - - - - - - - - - diff --git a/NuGet/NuGetPack.cmd b/NuGet/NuGetPack.cmd deleted file mode 100644 index b010c460..00000000 --- a/NuGet/NuGetPack.cmd +++ /dev/null @@ -1,2 +0,0 @@ -SET NUGET=..\src\.nuget\nuget -%NUGET% pack ServiceStack.Redis\servicestack.redis.nuspec -symbols diff --git a/NuGet/NuGetPush.cmd b/NuGet/NuGetPush.cmd deleted file mode 100644 index 0b9fda08..00000000 --- a/NuGet/NuGetPush.cmd +++ /dev/null @@ -1,3 +0,0 @@ -SET NUGET=..\src\.nuget\nuget -%NUGET% push ServiceStack.Redis.3.9.60.nupkg -%NUGET% push ServiceStack.Redis.3.9.60.symbols.nupkg diff --git a/NuGet/ServiceStack.Redis/servicestack.redis.nuspec b/NuGet/ServiceStack.Redis/servicestack.redis.nuspec deleted file mode 100644 index f61c242f..00000000 --- a/NuGet/ServiceStack.Redis/servicestack.redis.nuspec +++ /dev/null @@ -1,31 +0,0 @@ - - - - ServiceStack.Redis - C# Redis client for the Redis NoSQL DB - 4.5.0 - Service Stack - Service Stack - - C# Redis Client for the worlds fastest distributed NoSQL datastore. - Byte[], String and POCO Typed clients. - Thread-Safe Basic and Pooled client managers included. - - To revert back to earlier BSD v3, see: servicestack.net/download#v3 - - https://github.com/ServiceStack/ServiceStack.Redis - https://servicestack.net/terms - true - https://servicestack.net/img/logo-32.png - Redis NoSQL Client Distributed Cache PubSub Messaging Transactions - en-US - ServiceStack 2016 and contributors - - - - - - - - - diff --git a/README.md b/README.md index 6ff3a71e..cfbbe837 100644 --- a/README.md +++ b/README.md @@ -1,985 +1,5 @@ -Join the [ServiceStack Google+ Community](https://plus.google.com/communities/112445368900682590445) or follow [@ServiceStack](https://twitter.com/servicestack) for updates. +Follow [@ServiceStack](https://twitter.com/servicestack), [view the docs](https://docs.servicestack.net), use [StackOverflow](https://stackoverflow.com/questions/ask?tags=servicestack,servicestack.redis) or [Customer Forums](https://forums.servicestack.net/) for support. -# C#/.NET Client for Redis +# Read ServiceStack.Redis Docs at [docs.servicestack.net/redis](https://docs.servicestack.net/redis/) -## Redis Connection Strings - -Redis Connection strings have been expanded to support the more versatile URI format which is now able to capture most of Redis Client -settings in a single connection string (akin to DB Connection strings). - -Redis Connection Strings supports multiple URI-like formats, from a simple **hostname** or **IP Address and port** pair to a -fully-qualified **URI** with multiple options specified on the QueryString. - -Some examples of supported formats: - - localhost - 127.0.0.1:6379 - redis://localhost:6379 - password@localhost:6379 - clientid:password@localhost:6379 - redis://clientid:password@localhost:6380?ssl=true&db=1 - -> More examples can be seen in -[ConfigTests.cs](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/ConfigTests.cs) - -Any additional configuration can be specified as QueryString parameters. The full list of options that can be specified include: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SslboolIf this is an SSL connection
DbintThe Redis DB this connection should be set to
ClientstringA text alias to specify for this connection for analytic purposes
PasswordstringUrlEncoded version of the Password for this connection
ConnectTimeoutintTimeout in ms for making a TCP Socket connection
SendTimeoutintTimeout in ms for making a synchronous TCP Socket Send
ReceiveTimeoutintTimeout in ms for waiting for a synchronous TCP Socket Receive
IdleTimeOutSecsintTimeout in Seconds for an Idle connection to be considered active
NamespacePrefixstringUse a custom prefix for ServiceStack.Redis internal index colletions
- -## Download - - PM> Install-Package ServiceStack.Redis - -_Latest v4+ on NuGet is a [commercial release](https://servicestack.net/redis) with [free quotas](https://servicestack.net/download#free-quotas)._ - -#### [Getting Started with AWS ElastiCache Redis and ServiceStack](https://github.com/ServiceStackApps/AwsGettingStarted) - -ServiceStack.Redis has great support AWS's ElastiCache Redis solution, follow this guide to help getting up and running quickly: - -- [ElastiCache Redis](https://github.com/ServiceStackApps/AwsGettingStarted/blob/master/docs/redis-guide.md) - -### Try out [ServiceStack.Redis Live](http://gistlyn.com/redis-todo) - -A great way to try out ServiceStack.Redis is on [gistlyn.com](http://gistlyn.com) which lets you immediately -run and explore Redis features from the comfort of your browser with zero software install: - -[![](https://raw.githubusercontent.com/ServiceStack/Assets/master/img/redis/gistlyn-redis.png)](http://gistlyn.com/redis-todo) - -## Redis Client Managers - -The recommended way to access `RedisClient` instances is to use one of the available Thread-Safe Client Managers below. Client Managers are connection factories which is ideally registered as a Singleton either in your IOC or static classes. - -### RedisManagerPool - -With the enhanced Redis URI Connection Strings we've been able to simplify and streamline the existing `PooledRedisClientManager` implementation and have extracted it out into a new clients manager called `RedisManagerPool`. - -In addition to removing all above options on the Client Manager itself, readonly connection strings have also been removed so the configuration ends up much simpler and more aligned with the common use-case: - -```csharp -container.Register(c => - new RedisManagerPool(redisConnectionString)); -``` - -**Pooling Behavior** - -Any connections required after the maximum Pool size has been reached will be created and disposed outside of the Pool. By not being restricted to a maximum pool size, the pooling behavior in `RedisManagerPool` can maintain a smaller connection pool size at the cost of potentially having a higher opened/closed connection count. - -### PooledRedisClientManager - -If you prefer to define options on the Client Manager itself or you want to provide separate Read/Write and ReadOnly -(i.e. Master and Slave) redis-servers, use the `PooledRedisClientManager` instead: - -```csharp -container.Register(c => - new PooledRedisClientManager(redisReadWriteHosts, redisReadOnlyHosts) { - ConnectTimeout = 100, - //... - }); -``` - -**Pooling Behavior** - -The `PooledRedisClientManager` imposes a maximum connection limit and when its maximum pool size has been reached will instead block on any new connection requests until the next `RedisClient` is released back into the pool. If no client became available within `PoolTimeout`, a Pool `TimeoutException` will be thrown. - -### BasicRedisClientManager - -If don't want to use connection pooling (i.e. your accessing a local redis-server instance) you can use a basic (non-pooled) Clients Manager which creates a new `RedisClient` instance each time: - -```csharp -container.Register(c => - new BasicRedisClientManager(redisConnectionString)); -``` - -### Accessing the Redis Client - -Once registered, accessing the RedisClient is the same in all Client Managers, e.g: - -```csharp -var clientsManager = container.Resolve(); -using (IRedisClient redis = clientsManager.GetClient()) -{ - redis.IncrementValue("counter"); - List days = redis.GetAllItemsFromList("days"); - - //Access Typed API - var redisTodos = redis.As(); - - redisTodos.Store(new Todo { - Id = redisTodos.GetNextSequence(), - Content = "Learn Redis", - }); - - var todo = redisTodos.GetById(1); - - //Access Native Client - var redisNative = (IRedisNativeClient)redis; - - redisNative.Incr("counter"); - List days = redisNative.LRange("days", 0, -1); -} -``` - -A more detailed list of the available RedisClient APIs used in the example can be seen in the C# interfaces below: - - - [IRedisClient](https://github.com/ServiceStack/ServiceStack/blob/master/src/ServiceStack.Interfaces/Redis/IRedisClient.cs) - - [IRedisTypedClient](https://github.com/ServiceStack/ServiceStack/blob/master/src/ServiceStack.Interfaces/Redis/Generic/IRedisTypedClient.cs) - - [IRedisNativeClient](https://github.com/ServiceStack/ServiceStack/blob/master/src/ServiceStack.Interfaces/Redis/IRedisNativeClient.cs) - -## [Redis React Browser](https://servicestack.net/redis-react) - -Redis React is a simple user-friendly UI for browsing data in Redis servers which takes advantages of the complex -type conventions built in the ServiceStack.Redis Client to provide a rich, human-friendly UI for navigating related datasets, enabling a fast and fluid browsing experience for your Redis servers. - -#### [Live Demo](http://redisreact.servicestack.net/#/) - -[![](https://raw.githubusercontent.com/ServiceStack/Assets/master/img/livedemos/redis-react/home.png)](http://redisreact.servicestack.net/#/) - -#### Downloads available from [Redis React Home Page](https://servicestack.net/redis-react) - -## [Redis Sentinel](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Redis-Sentinel) - -To use the new Sentinel support, instead of populating the Redis Client Managers with the -connection string of the master and slave instances you would create a single RedisSentinel -instance configured with the connection string of the running Redis Sentinels: - -```csharp -var sentinelHosts = new[]{ "sentinel1", "sentinel2:6390", "sentinel3" }; -var sentinel = new RedisSentinel(sentinelHosts, masterName: "mymaster"); -``` - -This configues a `RedisSentinel` with 3 sentinel hosts looking at **mymaster** group. -As the default port for sentinels when unspecified is **26379** and how RedisSentinel is able to -auto-discover other sentinels, the minimum configuration required is with a single Sentinel host: - -```csharp -var sentinel = new RedisSentinel("sentinel1"); -``` - -### Custom Redis Connection String - -The host the RedisSentinel is configured with only applies to that Sentinel Host, to use the -flexibility of [Redis Connection Strings](#redis-connection-strings) to apply configuration on -individual Redis Clients you need to register a custom `HostFilter`: - -```csharp -sentinel.HostFilter = host => "{0}?db=1&RetryTimeout=5000".Fmt(host); -``` - -An alternative to using connection strings for configuring clients is to modify -[default configuration on RedisConfig](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Redis-Config). - -### Change to use RedisManagerPool - -By default RedisSentinel uses a `PooledRedisClientManager`, this can be changed to use the -newer `RedisManagerPool` with: - -```csharp -sentinel.RedisManagerFactory = (master,slaves) => new RedisManagerPool(master); -``` - -### Start monitoring Sentinels - -Once configured, you can start monitoring the Redis Sentinel servers and access the pre-configured -client manager with: - -```csharp -IRedisClientsManager redisManager = sentinel.Start(); -``` - -Which as before, can be registered in your preferred IOC as a **singleton** instance: - -```csharp -container.Register(c => sentinel.Start()); -``` - -## [Configure Redis Sentinel Servers](https://github.com/ServiceStack/redis-config) - -[![Instant Redis Setup](https://raw.githubusercontent.com/ServiceStack/Assets/master/img/redis/instant-sentinel-setup.png)](https://github.com/ServiceStack/redis-config) - -See the -[redis config project](https://github.com/ServiceStack/redis-config) for a quick way to setup up -the minimal -[highly available Redis Sentinel configuration](https://github.com/ServiceStack/redis-config/blob/master/README.md#3x-sentinels-monitoring-1x-master-and-2x-slaves) -including start/stop scripts for instantly running multiple redis instances on a single (or multiple) -Windows, OSX or Linux servers. - -### [Redis Stats](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Redis-Stats) - -You can use the `RedisStats` class for visibility and introspection into your running instances. -The [Redis Stats wiki](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Redis-Stats) lists the stats available. - -## [Automatic Retries](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Automatic-Retries) - -To improve the resilience of client connections, `RedisClient` will transparently retry failed -Redis operations due to Socket and I/O Exceptions in an exponential backoff starting from -**10ms** up until the `RetryTimeout` of **10000ms**. These defaults can be tweaked with: - -```csharp -RedisConfig.DefaultRetryTimeout = 10000; -RedisConfig.BackOffMultiplier = 10; -``` - -## [ServiceStack.Redis SSL Support](https://github.com/ServiceStack/ServiceStack/wiki/Secure-SSL-Redis-connections-to-Azure-Redis) - -ServiceStack.Redis now supporting **SSL connections** making it suitable for accessing remote Redis server instances over a -**secure SSL connection**. - -![Azure Redis Cache](https://github.com/ServiceStack/Assets/raw/master/img/wikis/redis/azure-redis-instance.png) - -### [Connecting to Azure Redis](https://github.com/ServiceStack/ServiceStack/wiki/Secure-SSL-Redis-connections-to-Azure-Redis) - -As connecting to [Azure Redis Cache](http://azure.microsoft.com/en-us/services/cache/) via SSL was the primary use-case for this feature, -we've added a new -[Getting connected to Azure Redis via SSL](https://github.com/ServiceStack/ServiceStack/wiki/Secure-SSL-Redis-connections-to-Azure-Redis) -to help you get started. - -## [Redis GEO](https://github.com/ServiceStackApps/redis-geo) - -The [release of Redis 3.2.0](http://antirez.com/news/104) brings it exciting new -[GEO capabilities](http://redis.io/commands/geoadd) which will let you store Lat/Long coordinates in Redis -and query locations within a specified radius. To demonstrate this functionality we've created a new -[Redis GEO Live Demo](https://github.com/ServiceStackApps/redis-geo) which lets you click on anywhere in -the U.S. to find the list of nearest cities within a given radius, Live Demo at: http://redisgeo.servicestack.net - - -## Generic APIs for calling Custom Redis commands - -Most of the time when waiting to use a new [Redis Command](http://redis.io/commands) you'll need to wait for an updated version of -**ServiceStack.Redis** to add support for the new commands likewise there are times when the Redis Client doesn't offer every permutation -that redis-server supports. - -With the new `Custom` and `RawCommand` APIs on `IRedisClient` and `IRedisNativeClient` you can now use the RedisClient to send your own -custom commands that can call adhoc Redis commands: - -```csharp -public interface IRedisClient -{ - ... - RedisText Custom(params object[] cmdWithArgs); -} - -public interface IRedisNativeClient -{ - ... - RedisData RawCommand(params object[] cmdWithArgs); - RedisData RawCommand(params byte[][] cmdWithBinaryArgs); -} -``` - -These Custom APIs take a flexible `object[]` arguments which accepts any serializable value e.g. -`byte[]`, `string`, `int` as well as any user-defined Complex Types which are transparently serialized -as JSON and send across the wire as UTF-8 bytes. - -```csharp -var ret = Redis.Custom("SET", "foo", 1); // ret.Text = "OK" - -byte[] cmdSet = Commands.Set; -ret = Redis.Custom(cmdSet, "bar", "b"); // ret.Text = "OK" - -ret = Redis.Custom("GET", "foo"); // ret.Text = "1" -``` - -There are also -[convenient extension methods](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/src/ServiceStack.Redis/RedisDataExtensions.cs) -on `RedisData` and `RedisText` that make it easy to access structured data, e.g: - -```csharp -var ret = Redis.Custom(Commands.Keys, "*"); -var keys = ret.GetResults(); // keys = ["foo", "bar"] - -ret = Redis.Custom(Commands.MGet, "foo", "bar"); -var values = ret.GetResults(); // values = ["1", "b"] - -Enum.GetNames(typeof(DayOfWeek)).ToList() - .ForEach(x => Redis.Custom(Commands.RPush, "DaysOfWeek", x)); -ret = Redis.Custom(Commands.LRange, "DaysOfWeek", 1, -2); -var weekDays = ret.GetResults(); - -weekDays.PrintDump(); // ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"] -``` - -and some more examples using Complex Types with the Custom APIs: - -```csharp -var ret = Redis.Custom(Commands.Set, "foo", new Poco { Name = "Bar" }); // ret.Text = "OK" - -ret = Redis.Custom(Commands.Get, "foo"); // ret.Text = {"Name":"Bar"} -Poco dto = ret.GetResult(); - -dto.Name.Print(); // Bar -``` - -This API is used in most of Redis React UI's -[redis.js](https://github.com/ServiceStackApps/RedisReact/blob/master/src/RedisReact/RedisReact/js/redis.js) -JavaScript client library where Redis server commands are made available via the -[single ServiceStack Service](https://github.com/ServiceStackApps/RedisReact/blob/a1b66603d52d2f18b96227fc455ecb5323e424c8/src/RedisReact/RedisReact.ServiceInterface/RedisServices.cs#L73): - -```csharp -public object Any(CallRedis request) -{ - var args = request.Args.ToArray(); - var response = new CallRedisResponse { Result = Redis.Custom(args) }; - return response; -} -``` - -## Managed Pub/Sub Server - -The Pub/Sub engine powering -[Redis ServerEvents](https://github.com/ServiceStack/ServiceStack/wiki/Redis-Server-Events) and -[Redis MQ](https://github.com/ServiceStack/ServiceStack/wiki/Messaging-and-Redis) has been extracted -and encapsulated it into a re-usable class that can be used independently for handling messages -published to specific [Redis Pub/Sub](http://redis.io/commands#pubsub) channels. - -`RedisPubSubServer` processes messages in a managed background thread that **automatically reconnects** -when the redis-server connection fails and works like an independent background Service that can be -stopped and started on command. - -The public API is captured in the -[IRedisPubSubServer](https://github.com/ServiceStack/ServiceStack/blob/master/src/ServiceStack.Interfaces/Redis/IRedisPubSubServer.cs) interface: - -```csharp -public interface IRedisPubSubServer : IDisposable -{ - IRedisClientsManager ClientsManager { get; } - // What Channels it's subscribed to - string[] Channels { get; } - - // Run once on initial StartUp - Action OnInit { get; set; } - // Called each time a new Connection is Started - Action OnStart { get; set; } - // Invoked when Connection is broken or Stopped - Action OnStop { get; set; } - // Invoked after Dispose() - Action OnDispose { get; set; } - - // Fired when each message is received - Action OnMessage { get; set; } - // Fired after successfully subscribing to the specified channels - Action OnUnSubscribe { get; set; } - // Called when an exception occurs - Action OnError { get; set; } - // Called before attempting to Failover to a new redis master - Action OnFailover { get; set; } - - int? KeepAliveRetryAfterMs { get; set; } - // The Current Time for RedisServer - DateTime CurrentServerTime { get; } - - // Current Status: Starting, Started, Stopping, Stopped, Disposed - string GetStatus(); - // Different life-cycle stats - string GetStatsDescription(); - - // Subscribe to specified Channels and listening for new messages - IRedisPubSubServer Start(); - // Close active Connection and stop running background thread - void Stop(); - // Stop than Start - void Restart(); -} -``` -### Usage - -To use `RedisPubSubServer`, initialize it with the channels you want to subscribe to and assign handlers -for each of the events you want to handle. At a minimum you'll want to handle `OnMessage`: - -```csharp -var clientsManager = new PooledRedisClientManager(); -var redisPubSub = new RedisPubSubServer(clientsManager, "channel-1", "channel-2") { - OnMessage = (channel, msg) => "Received '{0}' from '{1}'".Print(msg, channel) - }.Start(); -``` - -Calling `Start()` after it's initialized will get it to start listening and processing any messages -published to the subscribed channels. - -### Lex Operations - -The new [ZRANGEBYLEX](http://redis.io/commands/zrangebylex) sorted set operations allowing you to query a sorted set lexically have been added. -A good showcase for this is available on [autocomplete.redis.io](http://autocomplete.redis.io/). - -These new operations are available as a 1:1 mapping with redis-server on `IRedisNativeClient`: - -```csharp -public interface IRedisNativeClient -{ - ... - byte[][] ZRangeByLex(string setId, string min, string max, int? skip, int? take); - long ZLexCount(string setId, string min, string max); - long ZRemRangeByLex(string setId, string min, string max); -} -``` - -And the more user-friendly APIs under `IRedisClient`: - -```csharp -public interface IRedisClient -{ - ... - List SearchSortedSet(string setId, string start=null, string end=null); - long SearchSortedSetCount(string setId, string start=null, string end=null); - long RemoveRangeFromSortedSetBySearch(string setId, string start=null, string end=null); -} -``` - -Just like NuGet version matchers, Redis uses `[` char to express inclusiveness and `(` char for exclusiveness. -Since the `IRedisClient` APIs defaults to inclusive searches, these two APIs are the same: - -```csharp -Redis.SearchSortedSetCount("zset", "a", "c") -Redis.SearchSortedSetCount("zset", "[a", "[c") -``` - -Alternatively you can specify one or both bounds to be exclusive by using the `(` prefix, e.g: - -```csharp -Redis.SearchSortedSetCount("zset", "a", "(c") -Redis.SearchSortedSetCount("zset", "(a", "(c") -``` - -More API examples are available in [LexTests.cs](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/LexTests.cs). - -### HyperLog API - -The development branch of Redis server (available when v3.0 is released) includes an ingenious algorithm to approximate the unique elements in a set with maximum space and time efficiency. For details about how it works see Redis's creator Salvatore's blog who [explains it in great detail](http://antirez.com/news/75). Essentially it lets you maintain an efficient way to count and merge unique elements in a set without having to store its elements. -A Simple example of it in action: - -```csharp -redis.AddToHyperLog("set1", "a", "b", "c"); -redis.AddToHyperLog("set1", "c", "d"); -var count = redis.CountHyperLog("set1"); //4 - -redis.AddToHyperLog("set2", "c", "d", "e", "f"); - -redis.MergeHyperLogs("mergedset", "set1", "set2"); - -var mergeCount = redis.CountHyperLog("mergedset"); //6 -``` - -### Scan APIs - -Redis v2.8 introduced a beautiful new [SCAN](http://redis.io/commands/scan) operation that provides an optimal strategy for traversing a redis instance entire keyset in managable-size chunks utilizing only a client-side cursor and without introducing any server state. It's a higher performance alternative and should be used instead of [KEYS](http://redis.io/commands/keys) in application code. SCAN and its related operations for traversing members of Sets, Sorted Sets and Hashes are now available in the Redis Client in the following APIs: - -```csharp -public interface IRedisClient -{ - ... - IEnumerable ScanAllKeys(string pattern = null, int pageSize = 1000); - IEnumerable ScanAllSetItems(string setId, string pattern = null, int pageSize = 1000); - IEnumerable> ScanAllSortedSetItems(string setId, string pattern = null, int pageSize = 1000); - IEnumerable> ScanAllHashEntries(string hashId, string pattern = null, int pageSize = 1000); -} - -//Low-level API -public interface IRedisNativeClient -{ - ... - ScanResult Scan(ulong cursor, int count = 10, string match = null); - ScanResult SScan(string setId, ulong cursor, int count = 10, string match = null); - ScanResult ZScan(string setId, ulong cursor, int count = 10, string match = null); - ScanResult HScan(string hashId, ulong cursor, int count = 10, string match = null); -} -``` - -The `IRedisClient` provides a higher-level API that abstracts away the client cursor to expose a lazy Enumerable sequence to provide an optimal way to stream scanned results that integrates nicely with LINQ, e.g: - -```csharp -var scanUsers = Redis.ScanAllKeys("urn:User:*"); -var sampleUsers = scanUsers.Take(10000).ToList(); //Stop after retrieving 10000 user keys -``` - -### Efficient SCAN in LUA - -The C# API below returns the first 10 results matching the `key:*` pattern: - -```csharp -var keys = Redis.ScanAllKeys(pattern: "key:*", pageSize: 10) - .Take(10).ToList(); -``` - -However the C# Streaming API above requires an unknown number of Redis Operations (bounded to the number of keys in Redis) -to complete the request. The number of SCAN calls can be reduced by choosing a higher `pageSize` to tell Redis to scan more keys -each time the SCAN operation is called. - -As the number of API calls has the potential to result in a large number of Redis Operations, it can end up yielding an unacceptable -delay due to the latency of multiple dependent remote network calls. An easy solution is to instead have the multiple SCAN calls -performed in-process on the Redis Server, eliminating the network latency of multiple SCAN calls, e.g: - -```csharp -const string FastScanScript = @" -local limit = tonumber(ARGV[2]) -local pattern = ARGV[1] -local cursor = 0 -local len = 0 -local results = {} -repeat - local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) - cursor = tonumber(r[1]) - for k,v in ipairs(r[2]) do - table.insert(results, v) - len = len + 1 - if len == limit then break end - end -until cursor == 0 or len == limit -return results"; - -RedisText r = redis.ExecLua(FastScanScript, "key:*", "10"); -r.Children.Count.Print() //= 10 -``` - -The `ExecLua` API returns this complex LUA table response in the `Children` collection of the `RedisText` Response. - -#### Alternative Complex API Response - -Another way to return complex data structures in a LUA operation is to serialize the result as JSON - - return cjson.encode(results) - -Which you can access as raw JSON by parsing the response as a String with: - -```csharp -string json = redis.ExecLuaAsString(FastScanScript, "key:*", "10"); -``` - -> This is also the approach used in Redis React's -[RedisServices](https://github.com/ServiceStackApps/RedisReact/blob/a1b66603d52d2f18b96227fc455ecb5323e424c8/src/RedisReact/RedisReact.ServiceInterface/RedisServices.cs#L60). - -### ExecCachedLua - -ExecCachedLua is a convenient high-level API that eliminates the bookkeeping required for executing high-performance server LUA -Scripts which suffers from many of the problems that RDBMS stored procedures have which depends on pre-existing state in the RDBMS -that needs to be updated with the latest version of the Stored Procedure. - -With Redis LUA you either have the option to send, parse, load then execute the entire LUA script each time it's called or -alternatively you could pre-load the LUA Script into Redis once on StartUp and then execute it using the Script's SHA1 hash. -The issue with this is that if the Redis server is accidentally flushed you're left with a broken application relying on a -pre-existing script that's no longer there. The new `ExecCachedLua` API provides the best of both worlds where it will always -execute the compiled SHA1 script, saving bandwidth and CPU but will also re-create the LUA Script if it no longer exists. - -You can instead execute the compiled LUA script above by its SHA1 identifier, which continues to work regardless if it never existed -or was removed at runtime, e.g: - -```csharp -// #1: Loads LUA script and caches SHA1 hash in Redis Client -r = redis.ExecCachedLua(FastScanScript, sha1 => - redis.ExecLuaSha(sha1, "key:*", "10")); - -// #2: Executes using cached SHA1 hash -r = redis.ExecCachedLua(FastScanScript, sha1 => - redis.ExecLuaSha(sha1, "key:*", "10")); - -// Deletes all existing compiled LUA scripts -redis.ScriptFlush(); - -// #3: Executes using cached SHA1 hash, gets NOSCRIPT Error, -// re-creates then re-executes the LUA script using its SHA1 hash -r = redis.ExecCachedLua(FastScanScript, sha1 => - redis.ExecLuaSha(sha1, "key:*", "10")); -``` - -### IRedisClient LUA APIs - -The `IRedisClient` APIs for [redis server-side LUA support](http://redis.io/commands/eval) have been re-factored into the more user-friendly APIs below: - -```csharp -public interface IRedisClient -{ - //Eval/Lua operations - T ExecCachedLua(string scriptBody, Func scriptSha1); - - RedisText ExecLua(string body, params string[] args); - RedisText ExecLua(string luaBody, string[] keys, string[] args); - RedisText ExecLuaSha(string sha1, params string[] args); - RedisText ExecLuaSha(string sha1, string[] keys, string[] args); - - string ExecLuaAsString(string luaBody, params string[] args); - string ExecLuaAsString(string luaBody, string[] keys, string[] args); - string ExecLuaShaAsString(string sha1, params string[] args); - string ExecLuaShaAsString(string sha1, string[] keys, string[] args); - - int ExecLuaAsInt(string luaBody, params string[] args); - int ExecLuaAsInt(string luaBody, string[] keys, string[] args); - int ExecLuaShaAsInt(string sha1, params string[] args); - int ExecLuaShaAsInt(string sha1, string[] keys, string[] args); - - List ExecLuaAsList(string luaBody, params string[] args); - List ExecLuaAsList(string luaBody, string[] keys, string[] args); - List ExecLuaShaAsList(string sha1, params string[] args); - List ExecLuaShaAsList(string sha1, string[] keys, string[] args); - - string CalculateSha1(string luaBody); - - bool HasLuaScript(string sha1Ref); - Dictionary WhichLuaScriptsExists(params string[] sha1Refs); - void RemoveAllLuaScripts(); - void KillRunningLuaScript(); - string LoadLuaScript(string body); -} -``` - -### Usage Examples - -Here's how you can implement a ZPOP in Lua to remove the items with the lowest rank from a sorted set: - -```csharp -var luaBody = @" - local val = redis.call('zrange', KEYS[1], 0, ARGV[1]-1) - if val then redis.call('zremrangebyrank', KEYS[1], 0, ARGV[1]-1) end - return val"; - -var i = 0; -var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); -alphabet.ForEach(x => Redis.AddItemToSortedSet("zalphabet", x, i++)); - -//Remove the letters with the lowest rank from the sorted set 'zalphabet' -var letters = Redis.ExecLuaAsList(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); -letters.PrintDump(); //[A, B, C] -``` - -And how to implement ZREVPOP to remove items with the highest rank from a sorted set: - -```csharp -var luaBody = @" - local val = redis.call('zrange', KEYS[1], -ARGV[1], -1) - if val then redis.call('zremrangebyrank', KEYS[1], -ARGV[1], -1) end - return val"; - -var i = 0; -var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); -alphabet.ForEach(x => Redis.AddItemToSortedSet("zalphabet", x, i++)); - -//Remove the letters with the highest rank from the sorted set 'zalphabet' -List letters = Redis.ExecLuaAsList(luaBody, - keys: new[] { "zalphabet" }, args: new[] { "3" }); - -letters.PrintDump(); //[X, Y, Z] -``` - -### Other examples - -Returning an int: - -```csharp -int intVal = Redis.ExecLuaAsInt("return 123"); //123 -int intVal = Redis.ExecLuaAsInt("return ARGV[1] + ARGV[2]", "10", "20"); //30 -``` - -Returning an string: - -```csharp -//Hello, Redis Lua! -var strVal = Redis.ExecLuaAsString(@"return 'Hello, ' .. ARGV[1] .. '!'", "Redis Lua"); -``` - -Returning a List of strings: - -```csharp -Enum.GetNames(typeof(DayOfWeek)).ToList() - .ForEach(x => Redis.AddItemToList("DaysOfWeek", x)); - -var daysOfWeek = Redis.ExecLuaAsList("return redis.call('LRANGE', 'DaysOfWeek', 0, -1)"); -daysOfWeek.PrintDump(); //[Sunday, Monday, Tuesday, ...] -``` - -More examples can be found in the [Redis Eval Lua tests](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs -) - -## Overview - -*The Redis client is an independent project and can be used with or without the ServiceStack webservices framework.* - -[Redis](http://code.google.com/p/redis/) is one of the fastest and most feature-rich key-value stores to come from the [NoSQL](http://en.wikipedia.org/wiki/NoSQL) movement. -It is similar to memcached but the dataset is not volatile, and values can either be strings lists, sets, sorted sets or hashes. - -[ServiceStack's C# Redis Client](https://github.com/ServiceStack/ServiceStack.Redis) is an Open Source C# Redis client based on [Miguel de Icaza](http://twitter.com/migueldeicaza) previous efforts with [redis-sharp](http://github.com/migueldeicaza/redis-sharp). - -There are a number of different APIs available which are all a friendly drop-in with your local IOC: -The `ServiceStack.Redis.RedisClient` class below implements the following interfaces: - - * [ICacheClient](https://github.com/ServiceStack/ServiceStack/wiki/Caching) - If you are using Redis solely as a cache, you should bind to the [ServiceStack's common interface](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Caching) as there already are In-Memory an Memcached implementations available in ServiceStack, allowing you to easily switch providers in-future. - * [IRedisNativeClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisNativeClient) - For those wanting a low-level raw byte access (where you can control your own serialization/deserialization) that map 1:1 with Redis operations of the same name. - -For most cases if you require access to Redis specific functionality you would want to bind to the interface below: - - * [IRedisClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisClient) - Provides a friendlier, more descriptive API that lets you store values as strings (UTF8 encoding). - * [IRedisTypedClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisTypedClient) - created with `IRedisClient.As()` - it returns a 'strongly-typed client' that provides a typed-interface for all redis value operations that works against any C#/.NET POCO type. - -The class hierarchy for the C# Redis clients effectively look like: - - RedisTypedClient (POCO) > RedisClient (string) > RedisNativeClient (raw byte[]) - -Each client provides a different layer of abstraction: - - * The RedisNativeClient exposes raw **byte[]** apis and does no marshalling and passes all values directly to redis. - * The RedisClient assumes **string** values and simply converts strings to UTF8 bytes before sending to Redis - * The RedisTypedClient provides a generic interface allowing you to add POCO values. The POCO types are serialized using [.NETs fastest JSON Serializer](http://www.servicestack.net/mythz_blog/?p=344) which is then converted to UTF8 bytes and sent to Redis. - -### Redis Client API Overview -[![Redis Client API](http://mono.servicestack.net/img/Redis-annotated-preview.png)](http://mono.servicestack.net/img/Redis-annotated.png) - -### Thread-safe client managers -For multi-threaded applications you can choose from our different client connection managers: - - * BasicRedisClientManager - a load-balance (master-write and read-slaves) client manager that returns a new [IRedisClient](https://github.com/ServiceStack/ServiceStack.Redis/wiki/IRedisClient) connection with the defaults specified (faster when accessing a redis-server instance on the same host). - * PooledRedisClientManager - a load-balanced (master-write and read-slaves) client manager that utilizes a pool of redis client connections (faster when accessing a redis-server instance over the network). - -### [Docs and Downloads for older v3 BSD releases](https://github.com/ServiceStackV3/ServiceStackV3) - -## Copying - -Since September 2013, ServiceStack source code is available under GNU Affero General Public License/FOSS License Exception, see license.txt in the source. -Alternative commercial licensing is also available, see https://servicestack.net/pricing for details. - -## Contributing - -Commits should be made to the **v3-fixes** branch so they can be merged into both **v3** and **master** (v4) release branches. -Contributors need to approve the [Contributor License Agreement](https://docs.google.com/forms/d/16Op0fmKaqYtxGL4sg7w_g-cXXyCoWjzppgkuqzOeKyk/viewform) before any code will be reviewed, see the [Contributing wiki](https://github.com/ServiceStack/ServiceStack/wiki/Contributing) for more details. - -### Redis Server builds for Windows - - * [MS Open Tech - Redis on Windows](https://github.com/MSOpenTech/Redis) - * [Downloads for Cygwin 32bit Redis Server Windows builds](http://code.google.com/p/servicestack/wiki/RedisWindowsDownload). - * [Project that lets you run Redis as a Windows Service](https://github.com/rgl/redis) - * [Another Redis as a Windows Service project, which allows you to run separate service for each Redis instance](https://github.com/kcherenkov/redis-windows-service) - * [Downloads for MinGW 32bit and 64bit Redis Server Windows builds](http://github.com/dmajkic/redis/downloads) - -### Redis Virtual Machines - - * [Run Redis in a Vagrant virtual machine](https://github.com/JasonPunyon/redishobo) - -# Getting Started with the C# Redis client - -###[C# Redis Client wiki](https://github.com/ServiceStack/ServiceStack.Redis/wiki) -Contains all the examples, tutorials and resources you need to get you up to speed with common operations and the latest features. - -[Useful Links on Redis server](https://github.com/ServiceStack/ServiceStack.Redis/wiki/Useful-Redis-Links) - -### Specific Examples - * [Using Transactions in Redis (i.e. MULTI/EXEC/DISCARD)](https://github.com/ServiceStack/ServiceStack.Redis/wiki/RedisTransactions) - * [Using Redis's built-in Publish/Subscribe pattern for high performance network notifications](https://github.com/ServiceStack/ServiceStack.Redis/wiki/RedisPubSub) - * [Using Redis to create high performance *distributed locks* spannable across multiple app servers](https://github.com/ServiceStack/ServiceStack.Redis/wiki/RedisLocks) - -# Simple example using Redis Lists - -Below is a simple example to give you a flavour of how easy it is to use some of Redis's advanced data structures - in this case Redis Lists: -_Full source code of this example is [viewable online](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/ShippersExample.cs)_ - - using (var redisClient = new RedisClient()) - { - //Create a 'strongly-typed' API that makes all Redis Value operations to apply against Shippers - IRedisTypedClient redis = redisClient.As(); - - //Redis lists implement IList while Redis sets implement ICollection - var currentShippers = redis.Lists["urn:shippers:current"]; - var prospectiveShippers = redis.Lists["urn:shippers:prospective"]; - - currentShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Trains R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Trains, - UniqueRef = Guid.NewGuid() - }); - - currentShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Planes R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Planes, - UniqueRef = Guid.NewGuid() - }); - - var lameShipper = new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "We do everything!", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.All, - UniqueRef = Guid.NewGuid() - }; - - currentShippers.Add(lameShipper); - - Dump("ADDED 3 SHIPPERS:", currentShippers); - - currentShippers.Remove(lameShipper); - - Dump("REMOVED 1:", currentShippers); - - prospectiveShippers.Add( - new Shipper { - Id = redis.GetNextSequence(), - CompanyName = "Trucks R Us", - DateCreated = DateTime.UtcNow, - ShipperType = ShipperType.Automobiles, - UniqueRef = Guid.NewGuid() - }); - - Dump("ADDED A PROSPECTIVE SHIPPER:", prospectiveShippers); - - redis.PopAndPushBetweenLists(prospectiveShippers, currentShippers); - - Dump("CURRENT SHIPPERS AFTER POP n' PUSH:", currentShippers); - Dump("PROSPECTIVE SHIPPERS AFTER POP n' PUSH:", prospectiveShippers); - - var poppedShipper = redis.PopFromList(currentShippers); - Dump("POPPED a SHIPPER:", poppedShipper); - Dump("CURRENT SHIPPERS AFTER POP:", currentShippers); - - //reset sequence and delete all lists - redis.SetSequence(0); - redis.Remove(currentShippers, prospectiveShippers); - Dump("DELETING CURRENT AND PROSPECTIVE SHIPPERS:", currentShippers); - } - - /* - == EXAMPLE OUTPUT == - - ADDED 3 SHIPPERS: - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - Id:3,CompanyName:We do everything!,ShipperType:All,DateCreated:2010-01-31T11:53:37.8009371Z,UniqueRef:d0c249bbbaf84da39fc4afde1b34e332 - - REMOVED 1: - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - - ADDED A PROSPECTIVE SHIPPER: - Id:4,CompanyName:Trucks R Us,ShipperType:Automobiles,DateCreated:2010-01-31T11:53:37.8539401Z,UniqueRef:67d7d4947ebc4b0ba5c4d42f5d903bec - - CURRENT SHIPPERS AFTER POP n' PUSH: - Id:4,CompanyName:Trucks R Us,ShipperType:Automobiles,DateCreated:2010-01-31T11:53:37.8539401Z,UniqueRef:67d7d4947ebc4b0ba5c4d42f5d903bec - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - - PROSPECTIVE SHIPPERS AFTER POP n' PUSH: - - POPPED a SHIPPER: - Id:2,CompanyName:Planes R Us,ShipperType:Planes,DateCreated:2010-01-31T11:53:37.799937Z,UniqueRef:e02a73191f4b4e7a9c44eef5b5965d06 - - CURRENT SHIPPERS AFTER POP: - Id:4,CompanyName:Trucks R Us,ShipperType:Automobiles,DateCreated:2010-01-31T11:53:37.8539401Z,UniqueRef:67d7d4947ebc4b0ba5c4d42f5d903bec - Id:1,CompanyName:Trains R Us,ShipperType:Trains,DateCreated:2010-01-31T11:53:37.7169323Z,UniqueRef:d17c5db0415b44b2ac5da7b6ebd780f5 - - DELETING CURRENT AND PROSPECTIVE SHIPPERS: - */ - -More examples are available in the [RedisExamples Redis examples page] and in the comprehensive -[test suite](https://github.com/ServiceStack/ServiceStack.Redis/tree/master/tests/ServiceStack.Redis.Tests) - - -## Speed -One of the best things about Redis is the speed - it is quick. - -[This example](https://github.com/ServiceStack/ServiceStack.Redis/blob/master/tests/ServiceStack.Redis.Tests/RedisClientTests.cs) -below stores and gets the entire [Northwind database](http://code.google.com/p/servicestack/source/browse/trunk/Common/Northwind.Benchmarks/Northwind.Common/DataModel/NorthwindData.cs) (3202 records) in less *1.2 secs* - we've never had it so quick! - -_(Running inside a VS.NET/R# unit test on a 3 year old iMac)_ - - using (var client = new RedisClient()) - { - var before = DateTime.Now; - client.StoreAll(NorthwindData.Categories); - client.StoreAll(NorthwindData.Customers); - client.StoreAll(NorthwindData.Employees); - client.StoreAll(NorthwindData.Shippers); - client.StoreAll(NorthwindData.Orders); - client.StoreAll(NorthwindData.Products); - client.StoreAll(NorthwindData.OrderDetails); - client.StoreAll(NorthwindData.CustomerCustomerDemos); - client.StoreAll(NorthwindData.Regions); - client.StoreAll(NorthwindData.Territories); - client.StoreAll(NorthwindData.EmployeeTerritories); - - Console.WriteLine("Took {0}ms to store the entire Northwind database ({1} records)", - (DateTime.Now - before).TotalMilliseconds, totalRecords); - - - before = DateTime.Now; - var categories = client.GetAll(); - var customers = client.GetAll(); - var employees = client.GetAll(); - var shippers = client.GetAll(); - var orders = client.GetAll(); - var products = client.GetAll(); - var orderDetails = client.GetAll(); - var customerCustomerDemos = client.GetAll(); - var regions = client.GetAll(); - var territories = client.GetAll(); - var employeeTerritories = client.GetAll(); - - Console.WriteLine("Took {0}ms to get the entire Northwind database ({1} records)", - (DateTime.Now - before).TotalMilliseconds, totalRecords); - } - /* - == EXAMPLE OUTPUT == - - Took 1020.0583ms to store the entire Northwind database (3202 records) - Took 132.0076ms to get the entire Northwind database (3202 records) - */ - - -Note: The total time taken includes an extra Redis operation for each record to store the id in a Redis set for each -type as well as serializing and de-serializing each record using Service Stack's TypeSerializer. - - -# Community Resources - - - [Synchronizing Redis local caches for distributed multi-subscriber scenarios](http://toreaurstad.blogspot.no/2015/09/synchronizing-redis-local-caches-for.html) by [@Tore_Aurstad](https://twitter.com/Tore_Aurstad) - - [Distributed Caching using Redis Server with .NET/C# Client](http://www.codeproject.com/Articles/636730/Distributed-Caching-using-Redis) by [Sem.Shekhovtsov](http://www.codeproject.com/script/Membership/View.aspx?mid=6495187) - - [Fan Messaging with ServiceStack.Redis](http://cornishdev.wordpress.com/2013/04/04/fan-messaging-with-servicestack-redis/) by [miket](http://stackoverflow.com/users/1804544/miket) - - [Redis and VB.Net](http://blogs.lessthandot.com/index.php/DataMgmt/DBProgramming/redis-and-vb-net) by [@chrissie1](https://twitter.com/chrissie1) - - [Using ServiceStack.Redis Part 2: Sets and Hashes](http://michaelsarchet.com/using-servicestack-redis-part-2-sets-and-hashes/) by [@msarchet](http://twitter.com/msarchet) - - [Using the ServiceStack.Redis Client](http://michaelsarchet.com/using-the-servicestack-redis-client/) by [@msarchet](http://twitter.com/msarchet) - - [Implementing ServiceStack.Redis.RedisClient (.NET Client for Redis)](http://www.narizwallace.com/2012/10/implementing-servicestack-redis-redisclient-net-client-for-redis/) by [@NarizWallace](https://twitter.com/NarizWallace) - - [Getting started with Redis in ASP.NET under Windows](http://maxivak.com/getting-started-with-redis-and-asp-net-mvc-under-windows/) by [@maxivak](https://twitter.com/maxivak) - +### This repository [has moved](https://docs.servicestack.net/mono-repo) to [github.com/ServiceStack/ServiceStack/ServiceStack.Redis](https://github.com/ServiceStack/ServiceStack/tree/main/ServiceStack.Redis) diff --git a/build/appsettings.license.txt b/build/appsettings.license.txt deleted file mode 100644 index 1e498b17..00000000 --- a/build/appsettings.license.txt +++ /dev/null @@ -1 +0,0 @@ -1001-e1JlZjoxMDAxLE5hbWU6VGVzdCBCdXNpbmVzcyxUeXBlOkJ1c2luZXNzLEhhc2g6UHVNTVRPclhvT2ZIbjQ5MG5LZE1mUTd5RUMzQnBucTFEbTE3TDczVEF4QUNMT1FhNXJMOWkzVjFGL2ZkVTE3Q2pDNENqTkQyUktRWmhvUVBhYTBiekJGUUZ3ZE5aZHFDYm9hL3lydGlwUHI5K1JsaTBYbzNsUC85cjVJNHE5QVhldDN6QkE4aTlvdldrdTgyTk1relY2eis2dFFqTThYN2lmc0JveHgycFdjPSxFeHBpcnk6MjAxMy0wMS0wMX0= \ No newline at end of file diff --git a/build/build-core.proj b/build/build-core.proj new file mode 100644 index 00000000..3ac34b8b --- /dev/null +++ b/build/build-core.proj @@ -0,0 +1,80 @@ + + + + + + 6 + 0 + $(BUILD_NUMBER) + + + + $(MSBuildProjectDirectory)/.. + $(BuildSolutionDir)/src + $(BuildSolutionDir)/tests + Release + $(BuildSolutionDir)/NuGet/ + $(MajorVersion).$(MinorVersion).$(PatchVersion) + + + + + BeforeBuildSolutions; + BuildSolutions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <Version>[^<]* + <Version>$(PackageVersion) + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build/build-core.sh b/build/build-core.sh deleted file mode 100755 index 66cec0cc..00000000 --- a/build/build-core.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -if [ -z "$MajorVersion" ]; then - MajorVersion=1 -fi -if [ -z "$MinorVersion" ]; then - MinorVersion=0 -fi -if [ -z "$PatchVersion" ]; then - PatchVersion=$BUILD_NUMBER -fi -if [ -z "$RELEASE" ]; then - UnstableTag="-unstable" -fi - -Version=$MajorVersion.$MinorVersion.$PatchVersion.0 -EnvVersion=$MajorVersion.$MinorVersion$PatchVersion -PackageVersion=$MajorVersion.$MinorVersion.$PatchVersion$UnstableTag - -echo replace AssemblyVersion -find ./src -type f -name "AssemblyInfo.cs" -exec sed -i "s/AssemblyVersion(\"[^\"]\+\")/AssemblyVersion(\"1.0.0.0\")/g" {} + -echo replace AssemblyFileVersion -find ./src -type f -name "AssemblyInfo.cs" -exec sed -i "s/AssemblyFileVersion(\"[^\"]\+\")/AssemblyFileVersion(\"${Version}\")/g" {} + - -echo replace project.json -sed -i "s/\"version\": \"[^\"]\+\"/\"version\": \"${Version}\"/g" ./src/ServiceStack.Redis.NetCore/ServiceStack.Text/project.json -sed -i "s/\"version\": \"[^\"]\+\"/\"version\": \"${Version}\"/g" ./src/ServiceStack.Redis.NetCore/ServiceStack.Redis/project.json -sed -i "s/\"version\": \"[^\"]\+\"/\"version\": \"${Version}\"/g" ./src/ServiceStack.Redis.NetCore/ServiceStack.Interfaces/project.json -sed -i "s/\"version\": \"[^\"]\+\"/\"version\": \"${Version}\"/g" ./src/ServiceStack.Redis.NetCore/ServiceStack.Common/project.json - -echo replace package -find ./NuGet.Core -type f -name "*.nuspec" -exec sed -i "s/[^<]\+/${PackageVersion}/g" {} + -find ./NuGet.Core -type f -name "*.nuspec" -exec sed -i "s/\"ServiceStack.Text.Core\" version=\"[^\"]\+\"/\"ServiceStack.Text.Core\" version=\"\[${PackageVersion}, \)\"/g" {} + -find ./NuGet.Core -type f -name "*.nuspec" -exec sed -i "s/\"ServiceStack.Interfaces.Core\" version=\"[^\"]\+\"/\"ServiceStack.Interfaces.Core\" version=\"\[${PackageVersion}, \)\"/g" {} + -find ./NuGet.Core -type f -name "*.nuspec" -exec sed -i "s/\"ServiceStack.Common.Core\" version=\"[^\"]\+\"/\"ServiceStack.Common.Core\" version=\"\[${PackageVersion}, \)\"/g" {} + - - -#restore packages -#(cd ./src/ServiceStack.Redis.NetCore && dotnet restore) -#(cd ./tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests && dotnet restore) - -#execute tests -#(cd ./tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests && dotnet run -c Release) - -#nuget pack -#(cd ./NuGet.Core && ./nuget.exe pack ServiceStack.Redis.Core/servicestack.redis.core.nuspec -symbols) diff --git a/build/build-sn.proj b/build/build-sn.proj deleted file mode 100644 index 4fcb05a4..00000000 --- a/build/build-sn.proj +++ /dev/null @@ -1,105 +0,0 @@ - - - - - - 4 - 5 - $(BUILD_NUMBER) - - - - $(MSBuildProjectDirectory)/.. - $(BuildSolutionDir)/src - Signed - $(BuildSolutionDir)/src/.nuget/nuget.exe - $(BuildSolutionDir)/NuGet.Signed/ - $(MajorVersion).$(MinorVersion).$(PatchVersion).0 - -unstable - $(MajorVersion).$(MinorVersion).$(PatchVersion)$(UnstableTag) - - - - - - - - - - - BeforeBuildSolutions; - BuildSolutions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - AssemblyFileVersion\(\"\d+\.\d+\.\d+\.\d+\"\) - AssemblyFileVersion("$(Version)") - - - - - AssemblyVersion\(\"\d+\.\d+\.\d+\.\d+\"\) - AssemblyVersion("4.0.0.0") - - - - - version="4\.5[^"]*" - version="$(PackageVersion)" - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/build/build.bat b/build/build.bat index e6a22e53..c51cb92d 100644 --- a/build/build.bat +++ b/build/build.bat @@ -1,4 +1,3 @@ -SET MSBUILD=C:\Windows\Microsoft.NET\Framework\v4.0.30319\msbuild.exe +SET MSBUILD="C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe" -REM %MSBUILD% build.proj /target:NuGetPack /property:Configuration=Release;RELEASE=true;PatchVersion=1 -%MSBUILD% build-sn.proj /target:NuGetPack /property:Configuration=Signed;RELEASE=true;PatchVersion=0 +%MSBUILD% build.proj /property:Configuration=Release;MinorVersion=4;PatchVersion=1 diff --git a/build/build.proj b/build/build.proj index e526a956..7899a808 100644 --- a/build/build.proj +++ b/build/build.proj @@ -1,31 +1,23 @@ + xmlns='http://schemas.microsoft.com/developer/msbuild/2003' ToolsVersion="4.0"> - 4 - 5 + 6 + 0 $(BUILD_NUMBER) $(MSBuildProjectDirectory)/.. $(BuildSolutionDir)/src + $(BuildSolutionDir)/tests Release - $(BuildSolutionDir)/src/.nuget/nuget.exe $(BuildSolutionDir)/NuGet/ - $(MajorVersion).$(MinorVersion).$(PatchVersion).0 - -unstable - $(MajorVersion).$(MinorVersion).$(PatchVersion)$(UnstableTag) + $(MajorVersion).$(MinorVersion).$(PatchVersion) - - - - - - BeforeBuildSolutions; @@ -33,74 +25,55 @@ - - - - - - - - - - - - - - - - + + + + Properties="Configuration=$(Configuration)" /> + + + + + + + + + + - - AssemblyFileVersion\(\"\d+\.\d+\.\d+\.\d+\"\) - AssemblyFileVersion("$(Version)") - - - AssemblyVersion\(\"\d+\.\d+\.\d+\.\d+\"\) - AssemblyVersion("$(Version)") - - - key="servicestack:license".* - key="servicestack:license" value="$([System.IO.File]::ReadAllText($(MSBuildProjectDirectory)/appsettings.license.txt))" /> - - - - version="4\.5[^"]*" - version="$(PackageVersion)" + + <Version>[^<]* + <Version>$(PackageVersion) - + - + + - + - + + - - - - - - + + \ No newline at end of file diff --git a/build/copy.bat b/build/copy.bat deleted file mode 100644 index f59370af..00000000 --- a/build/copy.bat +++ /dev/null @@ -1,7 +0,0 @@ -REM SET BUILD=Debug -SET BUILD=Release - -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.Redis.* ..\NuGet\lib\net40 - -COPY ..\src\ServiceStack.Redis\bin\%BUILD%\ServiceStack.Redis.* ..\..\ServiceStack\lib -COPY ..\src\ServiceStack.Redis\bin\Signed\ServiceStack.Redis.* ..\..\ServiceStack\lib\signed diff --git a/lib/ServiceStack.Client.dll b/lib/ServiceStack.Client.dll deleted file mode 100644 index 2da412c7..00000000 Binary files a/lib/ServiceStack.Client.dll and /dev/null differ diff --git a/lib/ServiceStack.Client.xml b/lib/ServiceStack.Client.xml deleted file mode 100644 index 10ce7eee..00000000 --- a/lib/ServiceStack.Client.xml +++ /dev/null @@ -1,374 +0,0 @@ - - - - ServiceStack.Client - - - - Need to provide async request options - http://msdn.microsoft.com/en-us/library/86wf6409(VS.71).aspx - - - - The request filter is called before any request. - This request filter is executed globally. - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - This response action is executed globally. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Called before request resend, when the initial request required authentication - - - - - The request filter is called before any request. - This request filter only works with the instance where it was set (not global). - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Returns the next message from queueName or null if no message - - - - - - - Generic Proxy for service calls. - - The service Contract - - - - Returns the transparent proxy for the service call - - - - Need to provide async request options - http://msdn.microsoft.com/en-us/library/86wf6409(VS.71).aspx - - - - The request filter is called before any request. - This request filter is executed globally. - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - This response action is executed globally. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Sets all baseUri properties, using the Format property for the SyncReplyBaseUri and AsyncOneWayBaseUri - - Base URI of the service - - - - Whether to Accept Gzip,Deflate Content-Encoding and to auto decompress responses - - - - - The user name for basic authentication - - - - - The password for basic authentication - - - - - Sets the username and the password for basic authentication. - - - - - Determines if the basic auth header should be sent with every request. - By default, the basic auth header is only sent when "401 Unauthorized" is returned. - - - - - Specifies if cookies should be stored - - - - - Called by Send method if an exception occurs, for instance a System.Net.WebException because the server - returned an HTTP error code. Override if you want to handle specific exceptions or always want to parse the - response to a custom ErrorResponse DTO type instead of ServiceStack's ErrorResponse class. In case ex is a - System.Net.WebException, do not use - createWebRequest/getResponse/HandleResponse<TResponse> to parse the response - because that will result in the same exception again. Use - ThrowWebServiceException<YourErrorResponseType> to parse the response and to throw a - WebServiceException containing the parsed DTO. Then override Send to handle that exception. - - - - - Gets the collection of headers to be added to outgoing requests. - - - - - Whether to execute async callbacks on the same Synchronization Context it was called from. - - - - - Gets or sets authentication information for the request. - Warning: It's recommened to use and for basic auth. - This property is only used for IIS level authentication. - - - - - Called before request resend, when the initial request required authentication - - - - - The request filter is called before any request. - This request filter only works with the instance where it was set (not global). - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Creates the error response from the values provided. - - If the errorCode is empty it will use the first validation error code, - if there is none it will throw an error. - - The error code. - The error message. - The validation errors. - - - - - Default MaxStringContentLength is 8k, and throws an exception when reached - - - - - Serializer cache of delegates required to create a type from a string map (e.g. for REST urls) - - - - - Gets the namespace from an attribute marked on the type's definition - - - Namespace of type - - - - Specifies if cookies should be stored - - - - - Compresses the specified text using the default compression method: Deflate - - The text. - Type of the compression. - - - - - Decompresses the specified gz buffer using the default compression method: Inflate - - The gz buffer. - Type of the compression. - - - - - Donated by Ivan Korneliuk from his post: - http://korneliuk.blogspot.com/2012/08/servicestack-reusing-dtos.html - - Modified to only allow using routes matching the supplied HTTP Verb - - - - - Generate a url from a Request DTO. Pretty URL generation require Routes to be defined using `[Route]` on the Request DTO - - - - - The exception which is thrown when a validation error occurred. - This validation is serialized in a extra clean and human-readable way by ServiceStack. - - - - - Used if we need to serialize this exception to XML - - - - - - Returns the first error code - - The error code. - - - - Encapsulates a validation result. - - - - - Constructs a new ValidationResult - - - - - Constructs a new ValidationResult - - A list of validation results - - - - Initializes a new instance of the class. - - The errors. - The success code. - The error code. - - - - Merge errors from another - - - - - - Gets or sets the success code. - - The success code. - - - - Gets or sets the error code. - - The error code. - - - - Gets or sets the success message. - - The success message. - - - - Gets or sets the error message. - - The error message. - - - - The errors generated by the validation. - - - - - Returns True if the validation was successful (errors list is empty). - - - - - Adds the singleton instance of to an endpoint on the client. - - - Based on http://megakemp.wordpress.com/2009/02/06/managing-shared-cookies-in-wcf/ - - - - - Adds the singleton of the class to the client endpoint's message inspectors. - - The endpoint that is to be customized. - The client runtime to be customized. - - - - Maintains a copy of the cookies contained in the incoming HTTP response received from any service - and appends it to all outgoing HTTP requests. - - - This class effectively allows to send any received HTTP cookies to different services, - reproducing the same functionality available in ASMX Web Services proxies with the class. - Based on http://megakemp.wordpress.com/2009/02/06/managing-shared-cookies-in-wcf/ - - - - - Initializes a new instance of the class. - - - - - Inspects a message after a reply message is received but prior to passing it back to the client application. - - The message to be transformed into types and handed back to the client application. - Correlation state data. - - - - Inspects a message before a request message is sent to a service. - - The message to be sent to the service. - The client object channel. - - Null since no message correlation is used. - - - - - Gets the singleton instance. - - - - - Naming convention for the request's Response DTO - - - - - Shortcut to get the ResponseStatus whether it's bare or inside a IHttpResult - - - - - - diff --git a/lib/ServiceStack.Common.dll b/lib/ServiceStack.Common.dll deleted file mode 100755 index d6e928f2..00000000 Binary files a/lib/ServiceStack.Common.dll and /dev/null differ diff --git a/lib/ServiceStack.Common.xml b/lib/ServiceStack.Common.xml deleted file mode 100644 index abc4ab97..00000000 --- a/lib/ServiceStack.Common.xml +++ /dev/null @@ -1,410 +0,0 @@ - - - - ServiceStack.Common - - - - - Categories of sql statements. - - - - - Unknown - - - - - DML statements that alter database state, e.g. INSERT, UPDATE - - - - - Statements that return a single record - - - - - Statements that iterate over a result set - - - - - A callback for ProfiledDbConnection and family - - - - - Called when a command starts executing - - - - - - - Called when a reader finishes executing - - - - - - - - Called when a reader is done iterating through the data - - - - - - Called when an error happens during execution of a command - - - - - - - - True if the profiler instance is active - - - - - Wraps a database connection, allowing sql execution timings to be collected when a session is started. - - - - - Returns a new that wraps , - providing query execution profiling. If profiler is null, no profiling will occur. - - Your provider-specific flavor of connection, e.g. SqlConnection, OracleConnection - The currently started or null. - Determines whether the ProfiledDbConnection will dispose the underlying connection. - - - - The underlying, real database connection to your db provider. - - - - - The current profiler instance; could be null. - - - - - The raw connection this is wrapping - - - - - Wrapper for a db provider factory to enable profiling - - - - - Every provider factory must have an Instance public field - - - - - Allow to re-init the provider factory. - - - - - - - proxy - - - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - Return T[0] when enumerable is null, safe to use in enumerations like foreach - - - - - Gets the textual description of the enum if it has one. e.g. - - - enum UserColors - { - [Description("Bright Red")] - BrightRed - } - UserColors.BrightRed.ToDescription(); - - - - - - - - Creates a Console Logger, that logs all messages to: System.Console - - Made public so its testable - - - - - Default logger is to Console.WriteLine - - Made public so its testable - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - - - - Creates a Debug Logger, that logs all messages to: System.Diagnostics.Debug - - Made public so its testable - - - - - Default logger is to System.Diagnostics.Debug.WriteLine - - Made public so its testable - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - - - - Func to get the Strongly-typed field - - - - - Required to cast the return ValueType to an object for caching - - - - - Func to set the Strongly-typed field - - - - - Required to cast the ValueType to an object for caching - - - - - Required to cast the ValueType to an object for caching - - - - - Func to get the Strongly-typed field - - - - - Required to cast the return ValueType to an object for caching - - - - - Func to set the Strongly-typed field - - - - - Required to cast the ValueType to an object for caching - - - - - Required to cast the ValueType to an object for caching - - - - - Useful IPAddressExtensions from: - http://blogs.msdn.com/knom/archive/2008/12/31/ip-address-calculations-with-c-subnetmasks-networks.aspx - - - - - - Gets the ipv4 addresses from all Network Interfaces that have Subnet masks. - - - - - - Gets the ipv6 addresses from all Network Interfaces. - - - - - - Common functionality when creating adapters - - - - - Executes the specified expression. - - - The action. - - - - - Executes the specified action (for void methods). - - The action. - - - - Note: InMemoryLog keeps all logs in memory, so don't use it long running exceptions - - Returns a thread-safe InMemoryLog which you can use while *TESTING* - to provide a detailed analysis of your logs. - - - - - Creates a Unified Resource Name (URN) with the following formats: - - - urn:{TypeName}:{IdFieldValue} e.g. urn:UserSession:1 - - urn:{TypeName}:{IdFieldName}:{IdFieldValue} e.g. urn:UserSession:UserId:1 - - - - - - - Provide the an option for the callee to block until all commands are executed - - - - - - - Invokes the action provided and returns true if no excpetion was thrown. - Otherwise logs the exception and returns false if an exception was thrown. - - The action. - - - - - Runs an action for a minimum of runForMs - - What to run - Minimum ms to run for - time elapsed in micro seconds - - - - Returns average microseconds an action takes when run for the specified runForMs - - What to run - How many times to run for each iteration - Minimum ms to run for - - - - - - - diff --git a/lib/ServiceStack.Interfaces.dll b/lib/ServiceStack.Interfaces.dll deleted file mode 100755 index 2d82d288..00000000 Binary files a/lib/ServiceStack.Interfaces.dll and /dev/null differ diff --git a/lib/ServiceStack.Interfaces.dll.mdb b/lib/ServiceStack.Interfaces.dll.mdb deleted file mode 100644 index 7c7b9c57..00000000 Binary files a/lib/ServiceStack.Interfaces.dll.mdb and /dev/null differ diff --git a/lib/ServiceStack.Pcl.Net45.dll b/lib/ServiceStack.Pcl.Net45.dll deleted file mode 100644 index 1d289b87..00000000 Binary files a/lib/ServiceStack.Pcl.Net45.dll and /dev/null differ diff --git a/lib/ServiceStack.Pcl.WinStore.dll b/lib/ServiceStack.Pcl.WinStore.dll deleted file mode 100644 index 51ebd33c..00000000 Binary files a/lib/ServiceStack.Pcl.WinStore.dll and /dev/null differ diff --git a/lib/ServiceStack.Pcl.WinStore.pri b/lib/ServiceStack.Pcl.WinStore.pri deleted file mode 100644 index a3071fe8..00000000 Binary files a/lib/ServiceStack.Pcl.WinStore.pri and /dev/null differ diff --git a/lib/ServiceStack.Text.4.0.0.nupkg b/lib/ServiceStack.Text.4.0.0.nupkg deleted file mode 100644 index 77b86f71..00000000 Binary files a/lib/ServiceStack.Text.4.0.0.nupkg and /dev/null differ diff --git a/lib/ServiceStack.Text.4.0.0.symbols.nupkg b/lib/ServiceStack.Text.4.0.0.symbols.nupkg deleted file mode 100644 index c2210b1a..00000000 Binary files a/lib/ServiceStack.Text.4.0.0.symbols.nupkg and /dev/null differ diff --git a/lib/ServiceStack.Text.XML b/lib/ServiceStack.Text.XML deleted file mode 100644 index a0267c88..00000000 --- a/lib/ServiceStack.Text.XML +++ /dev/null @@ -1,1400 +0,0 @@ - - - - ServiceStack.Text - - - - - Utils to load types - - - - - Find the type from the name supplied - - [typeName] or [typeName, assemblyName] - - - - - The top-most interface of the given type, if any. - - - - - Find type if it exists - - - - The type if it exists - - - - If AlwaysUseUtc is set to true then convert all DateTime to UTC. If PreserveUtc is set to true then UTC dates will not convert to local - - - - - - - Repairs an out-of-spec XML date/time string which incorrectly uses a space instead of a 'T' to separate the date from the time. - These string are occasionally generated by SQLite and can cause errors in OrmLite when reading these columns from the DB. - - The XML date/time string to repair - The repaired string. If no repairs were made, the original string is returned. - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - Get the type(string) constructor if exists - - The type. - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Class to hold - - - - - - A fast, standards-based, serialization-issue free DateTime serailizer. - - - - - Determines whether this serializer can create the specified type from a string. - - The type. - - true if this instance [can create from string] the specified type; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Deserializes from reader. - - The reader. - - - - - Serializes to string. - - The value. - - - - - Serializes to writer. - - The value. - The writer. - - - - Sets which format to use when serializing TimeSpans - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - if the is configured - to support web-friendly serialized formats, ie emitting lowercase_underscore_casing for JSON - - - - - Define how property names are mapped during deserialization - - - - - Gets or sets a value indicating if the framework should throw serialization exceptions - or continue regardless of deserialization errors. If the framework - will throw; otherwise, it will parse as many fields as possible. The default is . - - - - - Gets or sets a value indicating if the framework should always convert to UTC format instead of local time. - - - - - Gets or sets a value indicating if the framework should skip automatic conversions. - Dates will be handled literally, any included timezone encoding will be lost and the date will be treaded as DateTimeKind.Local - Utc formatted input will result in DateTimeKind.Utc output. Any input without TZ data will be set DateTimeKind.Unspecified - This will take precedence over other flags like AlwaysUseUtc - JsConfig.DateHandler = DateHandler.ISO8601 should be used when set true for consistent de/serialization. - - - - - Gets or sets a value indicating if the framework should always assume is in UTC format if Kind is Unspecified. - - - - - Gets or sets whether we should append the Utc offset when we serialize Utc dates. Defaults to no. - Only supported for when the JsConfig.DateHandler == JsonDateHandler.TimestampOffset - - - - - Gets or sets a value indicating if unicode symbols should be serialized as "\uXXXX". - - - - - Gets or sets a value indicating if the framework should call an error handler when - an exception happens during the deserialization. - - Parameters have following meaning in order: deserialized entity, property name, parsed value, property type, caught exception. - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - Sets the maximum depth to avoid circular dependencies - - - - - Set this to enable your own type construction provider. - This is helpful for integration with IoC containers where you need to call the container constructor. - Return null if you don't know how to construct the type and the parameterless constructor will be used. - - - - - Always emit type info for this type. Takes precedence over ExcludeTypeInfo - - - - - Never emit type info for this type - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - Define custom serialization fn for BCL Structs - - - - - Opt-in flag to set some Value Types to be treated as a Ref Type - - - - - Whether there is a fn (raw or otherwise) - - - - - Define custom raw serialization fn - - - - - Define custom serialization hook - - - - - Define custom after serialization hook - - - - - Define custom deserialization fn for BCL Structs - - - - - Define custom raw deserialization fn for objects - - - - - Exclude specific properties of this type from being serialized - - - - - The property names on target types must match property names in the JSON source - - - - - The property names on target types may not match the property names in the JSON source - - - - - Uses the xsd format like PT15H10M20S - - - - - Uses the standard .net ToString method of the TimeSpan class - - - - - Get JSON string value converted to T - - - - - Get JSON string value - - - - - Get JSON string value - - - - - Get unescaped string value - - - - - Get unescaped string value - - - - - Write JSON Array, Object, bool or number values as raw string - - - - - Creates an instance of a Type from a string value - - - - - Parses the specified value. - - The value. - - - - - Shortcut escape when we're sure value doesn't contain any escaped chars - - - - - - - Given a character as utf32, returns the equivalent string provided that the character - is legal json. - - - - - - - Micro-optimization keep pre-built char arrays saving a .ToCharArray() + function call (see .net implementation of .Write(string)) - - - - - Searches the string for one or more non-printable characters. - - The string to search. - True if there are any characters that require escaping. False if the value can be written verbatim. - - Micro optimizations: since quote and backslash are the only printable characters requiring escaping, removed previous optimization - (using flags instead of value.IndexOfAny(EscapeChars)) in favor of two equality operations saving both memory and CPU time. - Also slightly reduced code size by re-arranging conditions. - TODO: Possible Linq-only solution requires profiling: return value.Any(c => !c.IsPrintable() || c == QuoteChar || c == EscapeChar); - - - - - Implement the serializer using a more static approach - - - - - - Implement the serializer using a more static approach - - - - - - Pretty Thread-Safe cache class from: - http://code.google.com/p/dapper-dot-net/source/browse/Dapper/SqlMapper.cs - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - Represents an individual object, allowing access to members by-name - - - - - Get or Set the value of a named member for the underlying object - - - - - The object represented by this instance - - - - - Use the target types definition of equality - - - - - Obtain the hash of the target object - - - - - Use the target's definition of a string representation - - - - - Wraps an individual object, allowing by-name access to that instance - - - - - Provides by-name member-access to objects of a given type - - - - - Does this type support new instances via a parameterless constructor? - - - - - Create a new instance of this type - - - - - Provides a type-specific accessor, allowing by-name access for all objects of that type - - The accessor is cached internally; a pre-existing accessor may be returned - - - - Get or set the value of a named member on the target instance - - - - - Generic implementation of object pooling pattern with predefined pool size limit. The main - purpose is that limited number of frequently used objects can be kept in the pool for - further recycling. - - Notes: - 1) it is not the goal to keep all returned objects. Pool is not meant for storage. If there - is no space in the pool, extra returned objects will be dropped. - - 2) it is implied that if object was obtained from a pool, the caller will return it back in - a relatively short time. Keeping checked out objects for long durations is ok, but - reduces usefulness of pooling. Just new up your own. - - Not returning objects to the pool in not detrimental to the pool's work, but is a bad practice. - Rationale: - If there is no intent for reusing the object, do not use pool - just use "new". - - - - - Not using System.Func{T} because this file is linked into the (debugger) Formatter, - which does not have that type (since it compiles against .NET 2.0). - - - - - Produces an instance. - - - Search strategy is a simple linear probing which is chosen for it cache-friendliness. - Note that Free will try to store recycled objects close to the start thus statistically - reducing how far we will typically search. - - - - - Returns objects to the pool. - - - Search strategy is a simple linear probing which is chosen for it cache-friendliness. - Note that Free will try to store recycled objects close to the start thus statistically - reducing how far we will typically search in Allocate. - - - - - Removes an object from leak tracking. - - This is called when an object is returned to the pool. It may also be explicitly - called if an object allocated from the pool is intentionally not being returned - to the pool. This can be of use with pooled arrays if the consumer wants to - return a larger array to the pool than was originally allocated. - - - - - this is RAII object to automatically release pooled object when its owning pool - - - - - Shared object pool for roslyn - - Use this shared pool if only concern is reducing object allocations. - if perf of an object pool itself is also a concern, use ObjectPool directly. - - For example, if you want to create a million of small objects within a second, - use the ObjectPool directly. it should have much less overhead than using this. - - - - - pool that uses default constructor with 100 elements pooled - - - - - pool that uses default constructor with 20 elements pooled - - - - - pool that uses string as key with StringComparer.OrdinalIgnoreCase as key comparer - - - - - pool that uses string as element with StringComparer.OrdinalIgnoreCase as element comparer - - - - - pool that uses string as element with StringComparer.Ordinal as element comparer - - - - - Used to reduce the # of temporary byte[]s created to satisfy serialization and - other I/O requests - - - - pooled memory : 4K * 512 = 4MB - - - - Reusable StringBuilder ThreadStatic Cache - - - - - Alternative Reusable StringBuilder ThreadStatic Cache - - - - - Reusable StringWriter ThreadStatic Cache - - - - - Alternative Reusable StringWriter ThreadStatic Cache - - - - - Manages pools of RecyclableMemoryStream objects. - - - There are two pools managed in here. The small pool contains same-sized buffers that are handed to streams - as they write more data. - - For scenarios that need to call GetBuffer(), the large pool contains buffers of various sizes, all - multiples of LargeBufferMultiple (1 MB by default). They are split by size to avoid overly-wasteful buffer - usage. There should be far fewer 8 MB buffers than 1 MB buffers, for example. - - - - - Generic delegate for handling events without any arguments. - - - - - Delegate for handling large buffer discard reports. - - Reason the buffer was discarded. - - - - Delegate for handling reports of stream size when streams are allocated - - Bytes allocated. - - - - Delegate for handling periodic reporting of memory use statistics. - - Bytes currently in use in the small pool. - Bytes currently free in the small pool. - Bytes currently in use in the large pool. - Bytes currently free in the large pool. - - - - pools[0] = 1x largeBufferMultiple buffers - pools[1] = 2x largeBufferMultiple buffers - etc., up to maximumBufferSize - - - - - Initializes the memory manager with the default block/buffer specifications. - - - - - Initializes the memory manager with the given block requiredSize. - - Size of each block that is pooled. Must be > 0. - Each large buffer will be a multiple of this value. - Buffers larger than this are not pooled - blockSize is not a positive number, or largeBufferMultiple is not a positive number, or maximumBufferSize is less than blockSize. - maximumBufferSize is not a multiple of largeBufferMultiple - - - - The size of each block. It must be set at creation and cannot be changed. - - - - - All buffers are multiples of this number. It must be set at creation and cannot be changed. - - - - - Gets or sets the maximum buffer size. - - Any buffer that is returned to the pool that is larger than this will be - discarded and garbage collected. - - - - Number of bytes in small pool not currently in use - - - - - Number of bytes currently in use by stream from the small pool - - - - - Number of bytes in large pool not currently in use - - - - - Number of bytes currently in use by streams from the large pool - - - - - How many blocks are in the small pool - - - - - How many buffers are in the large pool - - - - - How many bytes of small free blocks to allow before we start dropping - those returned to us. - - - - - How many bytes of large free buffers to allow before we start dropping - those returned to us. - - - - - Maximum stream capacity in bytes. Attempts to set a larger capacity will - result in an exception. - - A value of 0 indicates no limit. - - - - Whether to save callstacks for stream allocations. This can help in debugging. - It should NEVER be turned on generally in production. - - - - - Whether dirty buffers can be immediately returned to the buffer pool. E.g. when GetBuffer() is called on - a stream and creates a single large buffer, if this setting is enabled, the other blocks will be returned - to the buffer pool immediately. - Note when enabling this setting that the user is responsible for ensuring that any buffer previously - retrieved from a stream which is subsequently modified is not used after modification (as it may no longer - be valid). - - - - - Removes and returns a single block from the pool. - - A byte[] array - - - - Returns a buffer of arbitrary size from the large buffer pool. This buffer - will be at least the requiredSize and always be a multiple of largeBufferMultiple. - - The minimum length of the buffer - The tag of the stream returning this buffer, for logging if necessary. - A buffer of at least the required size. - - - - Returns the buffer to the large pool - - The buffer to return. - The tag of the stream returning this buffer, for logging if necessary. - buffer is null - buffer.Length is not a multiple of LargeBufferMultiple (it did not originate from this pool) - - - - Returns the blocks to the pool - - Collection of blocks to return to the pool - The tag of the stream returning these blocks, for logging if necessary. - blocks is null - blocks contains buffers that are the wrong size (or null) for this memory manager - - - - Retrieve a new MemoryStream object with no tag and a default initial capacity. - - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and a default initial capacity. - - A tag which can be used to track the source of the stream. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and at least the given capacity. - - A tag which can be used to track the source of the stream. - The minimum desired capacity for the stream. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and at least the given capacity, possibly using - a single continugous underlying buffer. - - Retrieving a MemoryStream which provides a single contiguous buffer can be useful in situations - where the initial size is known and it is desirable to avoid copying data between the smaller underlying - buffers to a single large one. This is most helpful when you know that you will always call GetBuffer - on the underlying stream. - A tag which can be used to track the source of the stream. - The minimum desired capacity for the stream. - Whether to attempt to use a single contiguous buffer. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and with contents copied from the provided - buffer. The provided buffer is not wrapped or used after construction. - - The new stream's position is set to the beginning of the stream when returned. - A tag which can be used to track the source of the stream. - The byte buffer to copy data from. - The offset from the start of the buffer to copy from. - The number of bytes to copy from the buffer. - A MemoryStream. - - - - Triggered when a new block is created. - - - - - Triggered when a new block is created. - - - - - Triggered when a new large buffer is created. - - - - - Triggered when a new stream is created. - - - - - Triggered when a stream is disposed. - - - - - Triggered when a stream is finalized. - - - - - Triggered when a stream is finalized. - - - - - Triggered when a user converts a stream to array. - - - - - Triggered when a large buffer is discarded, along with the reason for the discard. - - - - - Periodically triggered to report usage statistics. - - - - - MemoryStream implementation that deals with pooling and managing memory streams which use potentially large - buffers. - - - This class works in tandem with the RecylableMemoryStreamManager to supply MemoryStream - objects to callers, while avoiding these specific problems: - 1. LOH allocations - since all large buffers are pooled, they will never incur a Gen2 GC - 2. Memory waste - A standard memory stream doubles its size when it runs out of room. This - leads to continual memory growth as each stream approaches the maximum allowed size. - 3. Memory copying - Each time a MemoryStream grows, all the bytes are copied into new buffers. - This implementation only copies the bytes when GetBuffer is called. - 4. Memory fragmentation - By using homogeneous buffer sizes, it ensures that blocks of memory - can be easily reused. - - The stream is implemented on top of a series of uniformly-sized blocks. As the stream's length grows, - additional blocks are retrieved from the memory manager. It is these blocks that are pooled, not the stream - object itself. - - The biggest wrinkle in this implementation is when GetBuffer() is called. This requires a single - contiguous buffer. If only a single block is in use, then that block is returned. If multiple blocks - are in use, we retrieve a larger buffer from the memory manager. These large buffers are also pooled, - split by size--they are multiples of a chunk size (1 MB by default). - - Once a large buffer is assigned to the stream the blocks are NEVER again used for this stream. All operations take place on the - large buffer. The large buffer can be replaced by a larger buffer from the pool as needed. All blocks and large buffers - are maintained in the stream until the stream is disposed (unless AggressiveBufferReturn is enabled in the stream manager). - - - - - - All of these blocks must be the same size - - - - - This is only set by GetBuffer() if the necessary buffer is larger than a single block size, or on - construction if the caller immediately requests a single large buffer. - - If this field is non-null, it contains the concatenation of the bytes found in the individual - blocks. Once it is created, this (or a larger) largeBuffer will be used for the life of the stream. - - - - - This list is used to store buffers once they're replaced by something larger. - This is for the cases where you have users of this class that may hold onto the buffers longer - than they should and you want to prevent race conditions which could corrupt the data. - - - - - Unique identifier for this stream across it's entire lifetime - - Object has been disposed - - - - A temporary identifier for the current usage of this stream. - - Object has been disposed - - - - Gets the memory manager being used by this stream. - - Object has been disposed - - - - Callstack of the constructor. It is only set if MemoryManager.GenerateCallStacks is true, - which should only be in debugging situations. - - - - - Callstack of the Dispose call. It is only set if MemoryManager.GenerateCallStacks is true, - which should only be in debugging situations. - - - - - This buffer exists so that WriteByte can forward all of its calls to Write - without creating a new byte[] buffer on every call. - - - - - Allocate a new RecyclableMemoryStream object. - - The memory manager - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - The initial requested size to prevent future allocations - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - The initial requested size to prevent future allocations - An initial buffer to use. This buffer will be owned by the stream and returned to the memory manager upon Dispose. - - - - Returns the memory used by this stream back to the pool. - - Whether we're disposing (true), or being called by the finalizer (false) - This method is not thread safe and it may not be called more than once. - - - - Equivalent to Dispose - - - - - Gets or sets the capacity - - Capacity is always in multiples of the memory manager's block size, unless - the large buffer is in use. Capacity never decreases during a stream's lifetime. - Explicitly setting the capacity to a lower value than the current value will have no effect. - This is because the buffers are all pooled by chunks and there's little reason to - allow stream truncation. - - Object has been disposed - - - - Gets the number of bytes written to this stream. - - Object has been disposed - - - - Gets the current position in the stream - - Object has been disposed - - - - Whether the stream can currently read - - - - - Whether the stream can currently seek - - - - - Always false - - - - - Whether the stream can currently write - - - - - Returns a single buffer containing the contents of the stream. - The buffer may be longer than the stream length. - - A byte[] buffer - IMPORTANT: Doing a Write() after calling GetBuffer() invalidates the buffer. The old buffer is held onto - until Dispose is called, but the next time GetBuffer() is called, a new buffer from the pool will be required. - Object has been disposed - - - - Returns a new array with a copy of the buffer's contents. You should almost certainly be using GetBuffer combined with the Length to - access the bytes in this stream. Calling ToArray will destroy the benefits of pooled buffers, but it is included - for the sake of completeness. - - Object has been disposed - - - - Reads from the current position into the provided buffer - - Destination buffer - Offset into buffer at which to start placing the read bytes. - Number of bytes to read. - The number of bytes read - buffer is null - offset or count is less than 0 - offset subtracted from the buffer length is less than count - Object has been disposed - - - - Writes the buffer to the stream - - Source buffer - Start position - Number of bytes to write - buffer is null - offset or count is negative - buffer.Length - offset is not less than count - Object has been disposed - - - - Returns a useful string for debugging. This should not normally be called in actual production code. - - - - - Writes a single byte to the current position in the stream. - - byte value to write - Object has been disposed - - - - Reads a single byte from the current position in the stream. - - The byte at the current position, or -1 if the position is at the end of the stream. - Object has been disposed - - - - Sets the length of the stream - - value is negative or larger than MaxStreamLength - Object has been disposed - - - - Sets the position to the offset from the seek location - - How many bytes to move - From where - The new position - Object has been disposed - offset is larger than MaxStreamLength - Invalid seek origin - Attempt to set negative position - - - - Synchronously writes this stream's bytes to the parameter stream. - - Destination stream - Important: This does a synchronous write, which may not be desired in some situations - - - - Release the large buffer (either stores it for eventual release or returns it immediately). - - - - - A class to allow the conversion of doubles to string representations of - their exact decimal values. The implementation aims for readability over - efficiency. - - Courtesy of @JonSkeet - http://www.yoda.arachsys.com/csharp/DoubleConverter.cs - - - - - - - - How many digits are *after* the decimal point - - - - - Constructs an arbitrary decimal expansion from the given long. - The long must not be negative. - - - - - Multiplies the current expansion by the given amount, which should - only be 2 or 5. - - - - - Shifts the decimal point; a negative value makes - the decimal expansion bigger (as fewer digits come after the - decimal place) and a positive value makes the decimal - expansion smaller. - - - - - Removes leading/trailing zeroes from the expansion. - - - - - Converts the value to a proper decimal string representation. - - - - - Creates an instance of a Type from a string value - - - - - Determines whether the specified type is convertible from string. - - The type. - - true if the specified type is convertible from string; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Parses the specified type. - - The type. - The value. - - - - - Useful extension method to get the Dictionary[string,string] representation of any POCO type. - - - - - - Recursively prints the contents of any POCO object in a human-friendly, readable format - - - - - - Print Dump to Console.WriteLine - - - - - Print string.Format to Console.WriteLine - - - - - Parses the specified value. - - The value. - - - - - Populate an object with Example data. - - - - - - - Populates the object with example data. - - - Tracks how deeply nested we are - - - - - Public Code API to register commercial license for ServiceStack. - - - - - Internal Utilities to verify licensing - - - - - Maps the path of a file in the context of a VS project - - the relative path - the absolute path - Assumes static content is two directories above the /bin/ directory, - eg. in a unit test scenario the assembly would be in /bin/Debug/. - - - - Maps the path of a file in a self-hosted scenario - - the relative path - the absolute path - Assumes static content is copied to /bin/ folder with the assemblies - - - - Maps the path of a file in an Asp.Net hosted scenario - - the relative path - the absolute path - Assumes static content is in the parent folder of the /bin/ directory - - - - Implement the serializer using a more static approach - - - - - - Creates a new instance of type. - First looks at JsConfig.ModelFactory before falling back to CreateInstance - - - - - Creates a new instance of type. - First looks at JsConfig.ModelFactory before falling back to CreateInstance - - - - - Creates a new instance from the default constructor of type - - - - - Add a Property attribute at runtime. - Not threadsafe, should only add attributes on Startup. - - - - - Add a Property attribute at runtime. - Not threadsafe, should only add attributes on Startup. - - - - - @jonskeet: Collection of utility methods which operate on streams. - r285, February 26th 2009: http://www.yoda.arachsys.com/csharp/miscutil/ - - - - - Reads the given stream up to the end, returning the data as a byte - array. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer size. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer for transferring data. Note that the - current contents of the buffer is ignored, so the buffer needn't - be cleared beforehand. - - - - - Copies all the data from one stream into another. - - - - - Copies all the data from one stream into another, using a buffer - of the given size. - - - - - Copies all the data from one stream into another, using the given - buffer for transferring data. Note that the current contents of - the buffer is ignored, so the buffer needn't be cleared beforehand. - - - - - Reads exactly the given number of bytes from the specified stream. - If the end of the stream is reached before the specified amount - of data is read, an exception is thrown. - - - - - Reads into a buffer, filling it completely. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Same as ReadExactly, but without the argument checks. - - - - - Converts from base: 0 - 62 - - The source. - From. - To. - - - - - Skip the encoding process for 'safe strings' - - - - - - diff --git a/lib/ServiceStack.Text.dll b/lib/ServiceStack.Text.dll deleted file mode 100755 index c38d2879..00000000 Binary files a/lib/ServiceStack.Text.dll and /dev/null differ diff --git a/lib/netcore/ServiceStack.Client.deps.json b/lib/netcore/ServiceStack.Client.deps.json deleted file mode 100755 index 77ebab45..00000000 --- a/lib/netcore/ServiceStack.Client.deps.json +++ /dev/null @@ -1,569 +0,0 @@ -{ - "runtimeTarget": { - "name": ".NETStandard,Version=v1.1/", - "signature": "c2ca356217873e400c28ec0f561ccb6ded04b221" - }, - "compilationOptions": {}, - "targets": { - ".NETStandard,Version=v1.1": {}, - ".NETStandard,Version=v1.1/": { - "ServiceStack.Client/1.0.0": { - "dependencies": { - "System.Net.Requests": "4.0.11", - "System.ServiceModel.Primitives": "4.1.0", - "System.Xml.XmlSerializer": "4.0.11", - "NETStandard.Library": "1.6.0", - "ServiceStack.Interfaces": "1.0.0", - "ServiceStack.Text": "1.0.0" - }, - "runtime": { - "ServiceStack.Client.dll": {} - } - }, - "Microsoft.NETCore.Platforms/1.0.1": {}, - "Microsoft.NETCore.Targets/1.0.1": {}, - "NETStandard.Library/1.6.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Collections": "4.0.11", - "System.Collections.Concurrent": "4.0.12", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tools": "4.0.1", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.Compression": "4.1.0", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.Net.Http": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Extensions": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Runtime.InteropServices.RuntimeInformation": "4.0.0", - "System.Runtime.Numerics": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11", - "System.Xml.XDocument": "4.0.11" - } - }, - "runtime.native.System/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Collections/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Collections.Concurrent/4.0.12": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.ComponentModel.EventBasedAsync/4.0.11": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Debug/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tools/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tracing/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Globalization/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.IO/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.Compression/4.1.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Linq/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Runtime": "4.1.0" - } - }, - "System.Linq.Expressions/4.1.0": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Net.Http/4.1.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Net.Primitives/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Net.Requests/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.ObjectModel/4.0.12": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Reflection/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Extensions/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Primitives/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Resources.ResourceManager/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Globalization": "4.0.11", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Runtime.Extensions/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Threading": "4.0.11", - "runtime.native.System": "4.0.0" - } - }, - "System.Runtime.Numerics/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Serialization.Xml/4.1.1": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11" - } - }, - "System.Security.Principal/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.0/System.Security.Principal.dll": {} - } - }, - "System.ServiceModel.Primitives/4.1.0": { - "dependencies": { - "System.ComponentModel.EventBasedAsync": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0", - "System.Runtime.Serialization.Primitives": "4.1.1", - "System.Runtime.Serialization.Xml": "4.1.1", - "System.Security.Principal": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Threading": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11" - } - }, - "System.Text.Encoding/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding.Extensions/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Text.RegularExpressions/4.1.0": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Threading/4.0.11": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Threading.Tasks/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Xml.ReaderWriter/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Xml.XDocument/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Xml.ReaderWriter": "4.0.11" - } - }, - "System.Xml.XmlSerializer/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Xml.ReaderWriter": "4.0.11" - } - }, - "ServiceStack.Interfaces/1.0.0": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Runtime.Serialization.Primitives": "4.1.1", - "NETStandard.Library": "1.6.0" - }, - "runtime": { - "ServiceStack.Interfaces.dll": {} - } - }, - "ServiceStack.Text/1.0.0": { - "dependencies": { - "NETStandard.Library": "1.6.0" - }, - "runtime": { - "ServiceStack.Text.dll": {} - } - } - } - }, - "libraries": { - "ServiceStack.Client/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "Microsoft.NETCore.Platforms/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Qkf0b5jVmurSJyGts1NQ6KCrw/ykB+uRSyiZzxNIz0hKJ59drPSY+8Rb8/nXmN3Ar8sO0VL/+91VjlBjbDo+Bw==" - }, - "Microsoft.NETCore.Targets/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-woSgWTniX+PUs0UfTvRyTzZXCx1sbhn0/oeRMNnLPNywMhc7lJWzJwhoK4QdQvuMtefSZXvwEencBVmULSY11A==" - }, - "NETStandard.Library/1.6.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-1f1f5BElDr9jmgvGIFRFvUTsZK7omNVoVTrfIiHNmN6VHUPQwj6jmiJbRwTZKU6sR/qLjz9bpVKdhMmRcvd7Yg==" - }, - "runtime.native.System/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-dYvT9BZVAXnQDczzkLR7OLqHtx0P8C2u7yPMT0A9lJiaXizeEr0Sq8KGHtWw0vrMGJIhC2tEDnk/rEmix7xvFA==" - }, - "System.Collections/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-H1xpPt68Ybp+x4vziFVydxl8VqCxplIwQQjUKTLd7teWJ3qyLiAfctntjydePapvxV6LWkSv+w83wlptjjzSGQ==" - }, - "System.Collections.Concurrent/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-CVwx4AkvIvfaEZL749N/YetFNOpY0gFSaB1YUDvrMsD/spm44Dv6EkRa479RmvL+3CSH65mfjTbOlAFcRy0qzg==" - }, - "System.ComponentModel.EventBasedAsync/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Z7SO6vvQIR84daPE4uhaNdef9CjgjDMGYkas8epUhf0U3WGuaGgZ0Mm4QuNycMdbHUY8KEdZrtgxonkAiJaAlA==" - }, - "System.Diagnostics.Debug/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-D+qY0fEVrB0HAgaeqkHyl+lw0j0T7ml41CFMGhDFAyuR70DNUEt0uX9bnZ8NFmgW9OwH4bo6p6mOO5+oEBiIaw==" - }, - "System.Diagnostics.Tools/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-50B15ESJFPA9k5Hg59AqYnPJOBxwYde88pu1DeXkQkJUeUercvv4WEAebXTEcI5pzX5gwbTAyjHqVg6wY6ipzQ==" - }, - "System.Diagnostics.Tracing/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-/GYpoJLNumZAaRu8phpTRdjvSBqJOmefQOOEktaJFUG1bj4IHRXhiyOC1avoVSWBPL7iCJaOBeHxnuv11IGbVw==" - }, - "System.Globalization/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-PhFXoTqNzq85B/VIKl0fRlBazZwaGfhi3zuX9hbjqJwoOVbzUOXbtu39I/RmekzFBA/1crc9M5eZBpIufCt0uw==" - }, - "System.IO/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-v8KrK9Bst0NVG0T6y74495lxx0rMJP2hsxL/wgBo6TXEyc0/RwU3UIziiXsywpWYI4b7g2ZaOmw0JojL/mj0kw==" - }, - "System.IO.Compression/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-s1TqMzqweaeRh4cj0ziwAOkfeH/9lDFz23r7WPp8sE7aKqB5XQyd/22L1WuCVHxp38TjsK4hcffkFwaBqXdjEg==" - }, - "System.Linq/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-9YthdfSFmMYPZdpW6aHMVYfg0+/jwkSfeC1aBlTCbJEF1NYYufMBLM5sta+hB0hYKnSDNVwjrgPr/jnTZmUgHg==" - }, - "System.Linq.Expressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-GhJqkXNnPKOlqg7kM/jt3AhEcMSeo1zhj5n5QN+sgjJOJ47ZdGcOMTcgg9MTWBUfg+zmp8FhctkGVMVQ80WF9A==" - }, - "System.Net.Http/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-a9U5YTufuVBBcEZc5ROTZe+J0XLSEDwLJ1/8q1Ox8t6ge5nq03GyeVU0vmjsjUqIj7MhMfiNbGWzUlcluzdFUg==" - }, - "System.Net.Primitives/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-J7RRG8BSh3V5A/mrEbOMjOPhYGCDGP8RMAIUY9kV6Mn0VohVuFTQpcReQ0ysF0L3KTnrhQAZ+oYOW0nBbatH1Q==" - }, - "System.Net.Requests/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-lUMwAjATj4cOk11QkBe8TyOhlQqrgxwW44z1bQjUZtHtHSyWBKqxGa7m+JOcOBXHBRkAskjcdKl/RL8rd0lTJw==" - }, - "System.ObjectModel/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-YfRHwBSdp7q4G7wLz8zfQnEnF7qxIYQLlpvsEshUmCYy1h/VvVQMqFkPnmr2xtlNv/bWRELJYdn2Ne9TaE4CfQ==" - }, - "System.Reflection/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-5kGUF30YxHiqTy/vztTk5XGiEy7/Kd21oA2RmK3SVo+vrwcuInQx+BAvC5+torQNKv9UStWQpOW8T6MhTqBODg==" - }, - "System.Reflection.Extensions/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-pbu4mxs9FUfcJfFCiMetbwKU0qXoUQGaAcTcxRVv8+ZKrsrhuIzLdiQ/9mnMe3p1jHvTZ9PTvMV351NUwZ2/iA==" - }, - "System.Reflection.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-5zUJswVZMUTN/7ge1y2axR9k76fgfM6aBqVNnYMiPa+V2v1xQf5nBfuAxWizqox6JtJFKtAcn9JsV1Fkf0dzpg==" - }, - "System.Resources.ResourceManager/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-i0tRoia6devfLZxJ6X5iMb3htkte0MsPbWPhyNNeFtsbITeqvv2xzlT9Q8oc7XRYkeBwXdPSk7sYwM3AD1kU1g==" - }, - "System.Runtime/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-mAEjfjsdtet4fpsr7IUHTq0tSMVdaML22AwowcG2Sz3wd+Hx3XW9ZB30phZ3VkFUo3dptR18rag17mzx3Fus4Q==" - }, - "System.Runtime.Extensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-KLm307+dNR5U0zs8zwyGFoYjfgQAjzsYurVi8QUmQQFZj0G4TUp+pFckkBad/JzMp0eCDjwCssvY+vrDuwovkQ==" - }, - "System.Runtime.InteropServices/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-LtfkuvdefkX+I0QDAQxeSaOc+J9jbl9mfcUvsY1E1zM5CWaW1DnDYNP/TIi01mgBvcbWv6EM/R2g8MuEh1xWvQ==" - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-nzu/g3sMbDThmr0a9HqyNT768clFzQmfFeluH315SWY+2jBMqYX+a1rHdBuQbZZ0EteBt5daX3ZxSAC2bzUI4Q==" - }, - "System.Runtime.Numerics/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-qLInCPqxB34+VV3SK1UVsUjGAGXYbgxMuCvg9qUqd6+PDyPF7olMUs7CmvuyTNT4tndYzsUW/vbiF97+DjRemA==" - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-HZ6Du5QrTG8MNJbf4e4qMO3JRAkIboGT5Fk804uZtg3Gq516S7hAqTm2UZKUHa7/6HUGdVy3AqMQKbns06G/cg==" - }, - "System.Runtime.Serialization.Xml/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-yqfKHkWUAdI0hdDIdD9KDzluKtZ8IIqLF3O7xIZlt6UTs1bOvFRpCvRTvGQva3Ak/ZM9/nq9IHBJ1tC4Ybcrjg==" - }, - "System.Security.Principal/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-C/+csurhHcDRfS5TcNIk/5ajY6Ew6KGXUlKDoJ//MP6LN/nCGYnqJmZXoYjVqXE8TafcNO31nbjXwbJIlX38Kg==" - }, - "System.ServiceModel.Primitives/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Kd65HOn/5pL9xtCUkSL8xVqpqBUYy9tsfo0qe/MTTzApY8WQ+6i4I2ts++M+m4vbOanCoEsjjUj26P6C6ilQjQ==" - }, - "System.Text.Encoding/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-NZstDbRv2D+HpaOnG9WBa/NimW5/FNAQeUz40/Hps/8XG/HxaMtwszt7m8RBNbzGVU5UtWCLlE2LPQoPNeGAPQ==" - }, - "System.Text.Encoding.Extensions/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-QBDnhgUt/2hwDAolf6Nw0PO/nPrynar2Ri8C92mH3iu5i/6S3i2U4RCFrrUa06RX2Bqo9FpYWT+gPfk1J8GbJA==" - }, - "System.Text.RegularExpressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-RJ2+BU99xwhEstibq07zhGTrCF33t9aKnT558MKRKe8PBNA4NnyKccuJ07xPSY7r8BDDD7zd/1/+jlb9ZoV9tA==" - }, - "System.Threading/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Mh2NHuUfYze/L//e26YcLzptpHU0ieFQ/Qi62OZ4/gtaP96Z46F8r5qU7BJrlsK/xDbZ3kTRToItkfeG8CcNzg==" - }, - "System.Threading.Tasks/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-GNzFpzEuRu8GVzkz0BqF3x1GWVJctyv0bUOcoXPJ1IKS5eUmqJS0AXe3GA176BS1s45HHpV7Bfgf6/rKknN/2g==" - }, - "System.Xml.ReaderWriter/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Meze+GGq2klys0eHZUyMEi8L7ECppJveBG8dveSUhVSqxjhrTUWfzL1fPxoo6wd9sKUHqhPKUjKSJ09EPHZEmw==" - }, - "System.Xml.XDocument/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Oa+Vu3lXK4jhOD/vP3FoC7aHYP0zEYnZ0PZa6aZyBxQGVv+tc57hY1TqiKlyB10sBrH89WOPHHCpYzYVztxpzA==" - }, - "System.Xml.XmlSerializer/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-FrazwwqfIXTfq23mfv4zH+BjqkSFNaNFBtjzu3I9NRmG8EELYyrv/fJnttCIwRMFRR/YKXF1hmsMmMEnl55HGw==" - }, - "ServiceStack.Interfaces/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "ServiceStack.Text/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - } - } -} \ No newline at end of file diff --git a/lib/netcore/ServiceStack.Client.dll b/lib/netcore/ServiceStack.Client.dll deleted file mode 100755 index 6b7614ef..00000000 Binary files a/lib/netcore/ServiceStack.Client.dll and /dev/null differ diff --git a/lib/netcore/ServiceStack.Client.pdb b/lib/netcore/ServiceStack.Client.pdb deleted file mode 100755 index 412980c1..00000000 Binary files a/lib/netcore/ServiceStack.Client.pdb and /dev/null differ diff --git a/lib/netcore/ServiceStack.Common.deps.json b/lib/netcore/ServiceStack.Common.deps.json deleted file mode 100644 index 88aa7d2a..00000000 --- a/lib/netcore/ServiceStack.Common.deps.json +++ /dev/null @@ -1,1137 +0,0 @@ -{ - "runtimeTarget": { - "name": ".NETStandard,Version=v1.3/", - "signature": "f6bdf0627409bf8c8c34a3f2bff1c384ea00737d" - }, - "compilationOptions": {}, - "targets": { - ".NETStandard,Version=v1.3": {}, - ".NETStandard,Version=v1.3/": { - "ServiceStack.Common/1.0.0": { - "dependencies": { - "System.Dynamic.Runtime": "4.0.11", - "System.Data.Common": "4.1.0", - "System.ComponentModel.Primitives": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Net.NetworkInformation": "4.1.0", - "System.Net.Security": "4.0.0", - "NETStandard.Library": "1.6.0", - "ServiceStack.Interfaces": "1.0.0", - "ServiceStack.Text": "1.0.0" - }, - "runtime": { - "ServiceStack.Common.dll": {} - } - }, - "Microsoft.NETCore.Platforms/1.0.1": {}, - "Microsoft.NETCore.Targets/1.0.1": {}, - "Microsoft.Win32.Primitives/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "NETStandard.Library/1.6.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.Win32.Primitives": "4.0.1", - "System.AppContext": "4.1.0", - "System.Collections": "4.0.11", - "System.Collections.Concurrent": "4.0.12", - "System.Console": "4.0.0", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tools": "4.0.1", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.Globalization.Calendars": "4.0.1", - "System.IO": "4.1.0", - "System.IO.Compression": "4.1.0", - "System.IO.Compression.ZipFile": "4.0.1", - "System.IO.FileSystem": "4.0.1", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.Net.Http": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Net.Sockets": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Extensions": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Runtime.InteropServices.RuntimeInformation": "4.0.0", - "System.Runtime.Numerics": "4.0.1", - "System.Security.Cryptography.Algorithms": "4.2.0", - "System.Security.Cryptography.Encoding": "4.0.0", - "System.Security.Cryptography.Primitives": "4.0.0", - "System.Security.Cryptography.X509Certificates": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "System.Threading.Timer": "4.0.1", - "System.Xml.ReaderWriter": "4.0.11", - "System.Xml.XDocument": "4.0.11" - } - }, - "runtime.native.System/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "runtime.native.System.IO.Compression/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "runtime.native.System.Security.Cryptography/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.AppContext/4.1.0": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Buffers/4.0.0": { - "dependencies": { - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tracing": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Threading": "4.0.11" - }, - "runtime": { - "lib/netstandard1.1/System.Buffers.dll": {} - } - }, - "System.Collections/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Collections.Concurrent/4.0.12": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.Collections.Concurrent.dll": {} - } - }, - "System.ComponentModel/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.ComponentModel.dll": {} - } - }, - "System.ComponentModel.Primitives/4.1.0": { - "dependencies": { - "System.ComponentModel": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.0/System.ComponentModel.Primitives.dll": {} - } - }, - "System.Console/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Data.Common/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading.Tasks": "4.0.11" - }, - "runtime": { - "lib/netstandard1.2/System.Data.Common.dll": {} - } - }, - "System.Diagnostics.Debug/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.DiagnosticSource/4.0.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Tracing": "4.1.0", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0", - "System.Threading": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.Diagnostics.DiagnosticSource.dll": {} - } - }, - "System.Diagnostics.Tools/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tracing/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Dynamic.Runtime/4.0.11": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Globalization": "4.0.11", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Emit": "4.0.1", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Reflection.TypeExtensions": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Threading": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.Dynamic.Runtime.dll": {} - } - }, - "System.Globalization/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Globalization.Calendars/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Globalization": "4.0.11", - "System.Runtime": "4.1.0" - } - }, - "System.IO/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.Compression/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.IO": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "runtime.native.System": "4.0.0", - "runtime.native.System.IO.Compression": "4.1.0" - } - }, - "System.IO.Compression.ZipFile/4.0.1": { - "dependencies": { - "System.Buffers": "4.0.0", - "System.IO": "4.1.0", - "System.IO.Compression": "4.1.0", - "System.IO.FileSystem": "4.0.1", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Text.Encoding": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.IO.Compression.ZipFile.dll": {} - } - }, - "System.IO.FileSystem/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.FileSystem.Primitives/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.IO.FileSystem.Primitives.dll": {} - } - }, - "System.Linq/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Runtime": "4.1.0" - } - }, - "System.Linq.Expressions/4.1.0": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Net.Http/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.Win32.Primitives": "4.0.1", - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.DiagnosticSource": "4.0.0", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.Compression": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Security.Cryptography.X509Certificates": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Net.NetworkInformation/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.Win32.Primitives": "4.0.1", - "System.Collections": "4.0.11", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.FileSystem": "4.0.1", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Linq": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Net.Sockets": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Security.Principal.Windows": "4.0.0", - "System.Threading": "4.0.11", - "System.Threading.Overlapped": "4.0.1", - "System.Threading.Tasks": "4.0.11", - "System.Threading.Thread": "4.0.0", - "System.Threading.ThreadPool": "4.0.10", - "runtime.native.System": "4.0.0" - } - }, - "System.Net.Primitives/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1" - } - }, - "System.Net.Security/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.Win32.Primitives": "4.0.1", - "System.Collections": "4.0.11", - "System.Collections.Concurrent": "4.0.12", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Security.Claims": "4.0.1", - "System.Security.Cryptography.Primitives": "4.0.0", - "System.Security.Cryptography.X509Certificates": "4.1.0", - "System.Security.Principal": "4.0.1", - "System.Security.Principal.Windows": "4.0.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "System.Threading.ThreadPool": "4.0.10" - } - }, - "System.Net.Sockets/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.ObjectModel/4.0.12": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Threading": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.ObjectModel.dll": {} - } - }, - "System.Reflection/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Emit/4.0.1": { - "dependencies": { - "System.IO": "4.1.0", - "System.Reflection": "4.1.0", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.Reflection.Emit.dll": {} - } - }, - "System.Reflection.Emit.ILGeneration/4.0.1": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.Reflection.Emit.ILGeneration.dll": {} - } - }, - "System.Reflection.Extensions/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Primitives/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.TypeExtensions/4.1.0": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Resources.ResourceManager/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Globalization": "4.0.11", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Runtime.Extensions/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Handles/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1" - } - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Threading": "4.0.11", - "runtime.native.System": "4.0.0" - } - }, - "System.Runtime.Numerics/4.0.1": { - "dependencies": { - "System.Globalization": "4.0.11", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.Runtime.Numerics.dll": {} - } - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "dependencies": { - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.Runtime.Serialization.Primitives.dll": {} - } - }, - "System.Security.Claims/4.0.1": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Security.Principal": "4.0.1" - }, - "runtime": { - "lib/netstandard1.3/System.Security.Claims.dll": {} - } - }, - "System.Security.Cryptography.Algorithms/4.2.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Security.Cryptography.Primitives": "4.0.0" - } - }, - "System.Security.Cryptography.Encoding/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Collections": "4.0.11", - "System.Collections.Concurrent": "4.0.12", - "System.Linq": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Security.Cryptography.Primitives": "4.0.0", - "System.Text.Encoding": "4.0.11", - "runtime.native.System.Security.Cryptography": "4.0.0" - } - }, - "System.Security.Cryptography.Primitives/4.0.0": { - "dependencies": { - "System.Diagnostics.Debug": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.Security.Cryptography.Primitives.dll": {} - } - }, - "System.Security.Cryptography.X509Certificates/4.1.0": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Security.Cryptography.Algorithms": "4.2.0", - "System.Security.Cryptography.Encoding": "4.0.0" - } - }, - "System.Security.Principal/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.0/System.Security.Principal.dll": {} - } - }, - "System.Security.Principal.Windows/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.Win32.Primitives": "4.0.1", - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.Handles": "4.0.1", - "System.Runtime.InteropServices": "4.1.0", - "System.Security.Claims": "4.0.1", - "System.Security.Principal": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Threading": "4.0.11" - } - }, - "System.Text.Encoding/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding.Extensions/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Text.RegularExpressions/4.1.0": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Threading/4.0.11": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.Threading.dll": {} - } - }, - "System.Threading.Overlapped/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1" - } - }, - "System.Threading.Tasks/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Threading.Tasks.Extensions/4.0.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - }, - "runtime": { - "lib/netstandard1.0/System.Threading.Tasks.Extensions.dll": {} - } - }, - "System.Threading.Thread/4.0.0": { - "dependencies": { - "System.Runtime": "4.1.0" - }, - "runtime": { - "lib/netstandard1.3/System.Threading.Thread.dll": {} - } - }, - "System.Threading.ThreadPool/4.0.10": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Runtime.Handles": "4.0.1" - }, - "runtime": { - "lib/netstandard1.3/System.Threading.ThreadPool.dll": {} - } - }, - "System.Threading.Timer/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Xml.ReaderWriter/4.0.11": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.FileSystem": "4.0.1", - "System.IO.FileSystem.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading.Tasks": "4.0.11", - "System.Threading.Tasks.Extensions": "4.0.0" - }, - "runtime": { - "lib/netstandard1.3/System.Xml.ReaderWriter.dll": {} - } - }, - "System.Xml.XDocument/4.0.11": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tools": "4.0.1", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11" - }, - "runtime": { - "lib/netstandard1.3/System.Xml.XDocument.dll": {} - } - }, - "ServiceStack.Interfaces/1.0.0": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Runtime.Serialization.Primitives": "4.1.1", - "NETStandard.Library": "1.6.0" - }, - "runtime": { - "ServiceStack.Interfaces.dll": {} - } - }, - "ServiceStack.Text/1.0.0": { - "dependencies": { - "NETStandard.Library": "1.6.0" - }, - "runtime": { - "ServiceStack.Text.dll": {} - } - } - } - }, - "libraries": { - "ServiceStack.Common/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "Microsoft.NETCore.Platforms/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-AuiI4ZxFqt9sDBiIbgT92TSzC25YOY8cA+2VpfHqyXcXpz9pp7Su//rPZmtGl7ovEE3AFFjRrgMuaqxDxq/p0g==" - }, - "Microsoft.NETCore.Targets/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-PPc2oYN8VMKhKkCwyOk94mtf63cutoBgwp9rP6PIoMh8w7gFnUWip/sl8+xtFYexDiPHHbXx+PX5ulZPH0BXaQ==" - }, - "Microsoft.Win32.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-HEhHMYkqecL+ZU/XcKdmokzW6W0EO/QhyEhVtUi49MKTFNwxv5om3k7UKIWqAkpX9sTRPRKnz5dnLZJNXyYCww==" - }, - "NETStandard.Library/1.6.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-bNoEfVhCVbgLMYHLcB1gI6AsPiB7r5nDHFsCzCygNCUfFlSgOJL038iqSFhapXVV3V98h60lBfIQ0HHQqj8/8g==" - }, - "runtime.native.System/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-uxDsy7oPffaCxj52dV8rSzNFyvvwpFHHptYWo4ToGvd+MhfaLgM244uGnbzQ1t+hSbZJCkwQilWFMcfXxiIiVQ==" - }, - "runtime.native.System.IO.Compression/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-x0vRx5QowdfwfGwDF0rAg6vHM5EF9ezlW9OLMDtp3+upvQfh7FCkxsYuS3chYYj1JXKehVoTlNPHgWLRNOJEjg==" - }, - "runtime.native.System.Security.Cryptography/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-cvb500FdLAEET21S3gAt9Alj0H1w/jvKsvQBFzueGFZwd2u5AUM4m70dJrLEoUkEx7QUuojOpiuLg6MGODZ7lw==" - }, - "System.AppContext/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-IVhxeIrfcxofFwpM3CWhGnaz6Eg8iIzpXMi6Axl3itmzk19Hwr+FM7D9J9ycFE9enUiQarw6C4wJOrJP4nsKyQ==" - }, - "System.Buffers/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-5c/ZrUO4KsaD8So6pgRh54/E1NoPTmP55S/hHVPVc29kMOfCINKIl6wZMhKKo6f9ib4BIRH82d+4Re30t+itbg==" - }, - "System.Collections/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-VmHryoBwnRQx3fz6fPtRH2Xl7TC5GQJ5L2H1JIZZGeSMMtcDemBjTDrDzMJCnyN/XBb3nSOXHlQJhKhzU0VRqg==" - }, - "System.Collections.Concurrent/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-+Ok1+hnWZ5rRhwr6ty1iYCAVH8wXx1aIVSVhVSZcyzlJcG6YNfHxgGXtJi0fm6jh4PEcxpEWdh4lE3877P88Fg==" - }, - "System.ComponentModel/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-2GdGoekuDhVKkEVIEklefJj9XPN0tWfjWjafypx6uYwBjE1bkxvuFNq1z+ewjUIh3BXvsG6i9D0PeuPdD6hp5w==" - }, - "System.ComponentModel.Primitives/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-sc/7eVCdxPrp3ljpgTKVaQGUXiW05phNWvtv/m2kocXqrUQvTVWKou1Edas2aDjTThLPZOxPYIGNb/HN0QjURg==" - }, - "System.Console/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-KhlsqMNB501vVC5G9EaSh8lUL8XY0KyDnqZC6JS0V22fGqDCnLYyYbmlqAOyELLHXOZhN+eceRtY60EHW6pmxQ==" - }, - "System.Data.Common/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-epU8jeTe7aE7RqGHq9rZ8b0Q4Ah7DgubzHQblgZMSqgW1saW868WmooSyC5ywf8upLBkcVLDu93W9GPWUYsU2Q==" - }, - "System.Diagnostics.Debug/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-S62MHt4uEhX1CQ49ymUXiRx78eRB/JosH/o62tU0CBnrQaJ9UN3Qg1wiVSOqd9xfgzB981Si1kBokaJhW0N+5g==" - }, - "System.Diagnostics.DiagnosticSource/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-PFKMhx+2y2pjnUEb1Wm5rkSodgrriQFMnSc1RDxk2UaQaRZqpie9SCLRLHVa7edWFfCTNWUfxSDAnT6lhPzVgw==" - }, - "System.Diagnostics.Tools/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XQ6KjwwkGIQqPkDi9ru320X5K3scsiBRCDwYVKBvYKUkHOnnjp5dlqLRfLZ7OaCPwql/3EiOXZemZ4nRsC3qrg==" - }, - "System.Diagnostics.Tracing/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-SgYmK0myHMZD5hXDPRroeviesDAvFpZ6SItj+mzPEdyA/NX8PoEXBx1BvpQsGaxeK2pOQ5IBBl4xNM4I88ufTQ==" - }, - "System.Dynamic.Runtime/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-FKaQnGGKOqMRr8BpVlqv45X8IR6LPD6sExuyeZnXHm1JsWz4WAvPO65rqNHi1Q4jCJHWqrKoLm3RtJx1hKaZyg==" - }, - "System.Globalization/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-MuHuQIxNFC3ndnLcdZkKqBx0wiVbtrUaZ+z7R+stGWdj628/F4bX0XAKj/vT4aV1joBVQDQQig6Of76dBE3r/Q==" - }, - "System.Globalization.Calendars/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-5+ULfLHEH8xKLOOp7Yiw1PUCNnTloluT/TV6fkCLHlQdRRZHHqs/4m4Uwd6qXJz1t2FoSQO/TCfqy9rhna+LEQ==" - }, - "System.IO/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-OtfvEnXecJm1DHz5gVwXg0az/+PgihpYEL7vPrpEI6XtSNndHybSODLjXt4rx7RSHuN6vm7DBL0+rUe6LJizsg==" - }, - "System.IO.Compression/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-zOVmLCWb3aVDcBP2Oelu8mCS1dCoTaSFOpQbQpijuPeQxQPm6fTLEdwVULLvLVn/cYCS8ywiWEMh4nrO04kWQA==" - }, - "System.IO.Compression.ZipFile/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XznuguYXLXRfgVND3WL9+GBAKTrAm/uyIzFSGemqnAFnX9pDj6ZwRAb7FXZ4hGiYa7AcKVLOgL56hbsMFv/c/w==" - }, - "System.IO.FileSystem/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-puF7uw4QFE7+iPqfapdS8c9w1yOjiwgyr3/juyfZ7RbkuSLYTzOSAZdW7DVE4+5Kf+FsMy0GXxXwcUsTD/OSTQ==" - }, - "System.IO.FileSystem.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-CThOJLBPeVCe75dKKyzNiJfEM8XZFOHqAgIII/SSw1OfRHyGDhamBh8/HZuIp0aE8TTWu3wCoMZ2mSak4jjyqw==" - }, - "System.Linq/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-q0/YgSzkxZo1atYrpJeRWnp/s3kmHkZLHhtLbqhQhXA/FHu3yhSrtaSYNEg3eH/HehNgdGq9XdXEtS23LTIcZw==" - }, - "System.Linq.Expressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-/wBF/fAkXw93UwlLgQso99lk5MQQIXTIk0nvQTiToGchSqs0FwVhH/OmG9X0Zfm7LDeCXXtPKOVJ0tUXEc7Rhw==" - }, - "System.Net.Http/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-70DAk8tjoKYgYrV/3UArC7xo36hzK/Jaysxhi06SbC4R4aDU8wo7Y0ZrGOtyeRBedAqDaJSJ470JaCdId2TK+g==" - }, - "System.Net.NetworkInformation/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Q0rfeiW6QsiZuicGjrFA7cRr2+kXex0JIljTTxzI09GIftB8k+aNL31VsQD1sI2g31cw7UGDTgozA/FgeNSzsQ==" - }, - "System.Net.Primitives/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-tl/51pHaQVIMdpeuDx+s5IzfxluITA9ZWcBESpukkM70qKnMFRHcDae20oqW9rruJd60R56iuKjLn/ftua/UxQ==" - }, - "System.Net.Security/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-NtWq5sp36hLdAEPlHq/NQaOtYEywSReOLg5P/SI9Q1wRuGIZhyBXuRni1rFQwoDLxa0qZfbsbApYck8Nadl3wA==" - }, - "System.Net.Sockets/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-6N0p+1lotT7wlLRtZukRmIL7r08xKU2phSAWMWE9LkY4AW7CmPIiB5eJnJPo/Q2t4p5480kQmMzwSV1MWvAAFQ==" - }, - "System.ObjectModel/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-quRSAXHHbWah/5QPbkmZP4SsAvBi+SO5OVBzCK+cfeBvww1QqKpNXs8SWrCllPO0yC8cgPKoByLZix2+WajAkw==" - }, - "System.Reflection/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-BHn7GJpu7Wvh2Vh8CQuPIcxkm0tHtdc18VJ65jETI0tyUJlDjmoj5FYk5owpwJK2ecHd1LFexMDjtci2E+fj9g==" - }, - "System.Reflection.Emit/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-kCzOR9me59S4BQLtQQOEzVDDw8LwRUw72oXOQCzpvvGT5p+THbuz1H3R9PQuU0ghBwXLQNIjrIX5VwZU1fNU2w==" - }, - "System.Reflection.Emit.ILGeneration/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-+9WzFJYZFrm93xXaqAfpwNjfPdpGi7GerHmpXR/uBiKyWDMi4DOXet7DE2rrm5gH3vcZUPuafz58g8WyiZ/rrQ==" - }, - "System.Reflection.Extensions/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-UxdaA7ySJOv/1+7ZDKX6azEaKqq7PcqyByf4pqrM+CCI0Kf4qe6Jy7+Z9/yYp2n/UhfwKNHN6vdrhafQdyywIQ==" - }, - "System.Reflection.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-VHYeBsjjapUidsdcqf4Pb2Q8Dp51z6JPE5ACoQMM/XzxAkf0NjxVuG3b/WRmx08bX5dSErlXpp7+PIm7pXFVTg==" - }, - "System.Reflection.TypeExtensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-9CVnGOKIVso/hHkJGqMKw79Ejc9VCpi1NVRKF+CaBAbhrKZix52fCDU4uEdFQl3NoNY+zKctBMF1EhT1FHN2pQ==" - }, - "System.Resources.ResourceManager/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-KIRQz3lLz1RboyW9S7+KnwxKJz0WCL7AgD6dDAI1haKOPKpYrlPErQsWxuxDU+G4TVBXrH0bj03THc9JEXiq3Q==" - }, - "System.Runtime/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-d5JHGNNxNcvc0eGihw9SxQeIMO0Jj3zXwG735XwZu7uDNQP9aIHu5XaxqxBlN00LUpildp7rTRY4/ZiK7ZAHNw==" - }, - "System.Runtime.Extensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-nQRf6j90Q46wMHIjFv1GNLFLUlGHzI0tZHOpPLuZyIpHwBP8UFZyWWmHT6Keuoj0YF8nqtCb6F1GrGD5v54bMw==" - }, - "System.Runtime.Handles/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-lYelxB9lbJHmFaMLyFKl/6bP5V9WR1ZQFsVILbqmJSXLwJ8FEzcm1SSTKIwkrfB9EoOGQnNeQHv8M1TgzSyuew==" - }, - "System.Runtime.InteropServices/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-sGBGGySub1wdRddl8qFddrNMPYfOv/c79TrREDnjMFvBWzpPubcHozndOoHJpD/4tOyi4dmzuRckAyskjdjgEA==" - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-h+udLu2uLKD4y10y/+Uk6Vo55hcXh0scA1vsnXcmtI7NdeoMY1Wm+jFxbCFLB8IjD60Eq4bm3mFY6NjQEuZ2Rg==" - }, - "System.Runtime.Numerics/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-WtjbB9oVJnc0O3+xjloU7TZjmO3X41jF4ypeuIlvlEJxf1t9wYij89bOIF+0wzz6FO6x7420CFdnDOmyPCRf5A==" - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-HZ6Du5QrTG8MNJbf4e4qMO3JRAkIboGT5Fk804uZtg3Gq516S7hAqTm2UZKUHa7/6HUGdVy3AqMQKbns06G/cg==" - }, - "System.Security.Claims/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-gHzJvT+M1M+wMQSbWyCSAyZUaG2xxEB1VaWsY9ZffdqCnzDj6zCJDVJaz+uYjloB7Y196YjjD4pkRGZTvbmK/A==" - }, - "System.Security.Cryptography.Algorithms/4.2.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-QqLxIiWAUuCAwlKj91J4pWDTSLfC+p2kSvZp7k3alcAkuGltw719qwkytD1reIR8v3PVS0gPezzJF5c+FAPOpg==" - }, - "System.Security.Cryptography.Encoding/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-25HS8M8wA+DfYDwfmynZV/t581Py9CLvR2/WffpebnAapCKvUhTC49f5KX8JvWxm5UMo04IaKIGQXzDdCk/+tw==" - }, - "System.Security.Cryptography.Primitives/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-y4C1QTUFQVQ6eZXSWNg2l+ivGtyMAnDl44z+VR/UXr5vlj/6WC0wlzcDrT0elUNiLrgAy+cgK/oxRdc3tDQRpQ==" - }, - "System.Security.Cryptography.X509Certificates/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-WIqNGIeKe7iN0jba3+M7Jho/HIfpY5/nnMKSKDzhj2K0b/JPkC9HdGRuttynY1gNLSuxXrhoDOiVIaHdk8JkFQ==" - }, - "System.Security.Principal/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-0yZbeYcnYsLegPGMwq3V3EuG4v9syM3VBDIK6H2oNQMtLiS3KuINq8B9sFqdOiVbfperjrNM2bzd6V9U0v6u0g==" - }, - "System.Security.Principal.Windows/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-Dr+guotMgg8Jqt3uivhnKcGIyHIoI/Naju2SrB/e/oURyrfD1QQjx124B/XLFD4Bd3aNfbtDwJ9Rd+JsB2iCYQ==" - }, - "System.Text.Encoding/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-hzeM2B7LYy/HkddegftSFlMD8tqgPpPw+fkw/bhdnMaEPfC+c7+B+VnJzjwILccK158SOJHvKQA84MoTdVzz3w==" - }, - "System.Text.Encoding.Extensions/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-9EV5G8HSyslKwsO2sNXgOUnRA+Sai5bV7m4WkGOiKNDBzKit2dXQ8nOD34pbt4AMTY33G3LP+JLCMERjp6HSVg==" - }, - "System.Text.RegularExpressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-CVWIpxR0TE1uCQmpR30VZMRXPXWlAN/1lxGMdBYUBwG9YJIJAFLgp3N5gmt0Y1gp/ZfTilYxwL6KZv4sH81WUQ==" - }, - "System.Threading/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XM9KUm5ereNsrSTDxx8qUNyYHCIcSt5trkFY1wXQg+I50DYn+c2DCvhF1gVHEs3lMhBKEvBcIHkDq9HeOHye3w==" - }, - "System.Threading.Overlapped/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-dJiDoaCPWXPQ8E2ehE30HmwGof0NDpkNkSE306bcrlv9tyqKl1z/uip+Xv6Ni5xL50KrPqykHfzsMNSQ+uKUjw==" - }, - "System.Threading.Tasks/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-JALRGVfMPNCC2cgQuPPvzH4a0o0yunXmbSlt7Tt2EcRKiYTjdJzJOPTlJnybGXxN0Y1MsgZuxaL8UQKSHvwB8g==" - }, - "System.Threading.Tasks.Extensions/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-kDm7lM3YeKjo9S/6BVzsBp7R5jKGrwaow5NP60miUTjIzbF8O4pw2qwQjy+NLjbDJA0a2hAOUN10Juzb58zHjQ==" - }, - "System.Threading.Thread/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-vHoKsXmDPtrJ6lm1ZzRTYQhs6TUvriQuDhktiCmmXh//82LsS5e557p7W82MPB1NRfjv7ppBI/Jm65eGuz2jNg==" - }, - "System.Threading.ThreadPool/4.0.10": { - "type": "package", - "serviceable": true, - "sha512": "sha512-pzSjm3uhY7c91VqbbtObtrC5fXIkzof33JwioRuIfQuJgzxokBG4DrQ+oqEVOe2kEY6FCYAey+dT/JtGkM9bLQ==" - }, - "System.Threading.Timer/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-yY3M13wdPWlE2ZcwTcJJnH+8LyKMnI4iaF/587vnBhHsMUbKSWL0j40lGe3hP3U90SIFiRn6s54CPdQWHrnJBA==" - }, - "System.Xml.ReaderWriter/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-K1kvZs9CSLy5IOuKdocX9Ktmk1i1QA54h0pS5fIveQW4sRBqTBVVUkRDlp/VTeCRz3f8+sCVxspomiatflIxRg==" - }, - "System.Xml.XDocument/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-ydVR67d6gEeCt6mpntDAxmdrYkujFLlu4m5pEcqZVy8RtpTHOADxVNT3AxgcTScK3y4WdIJxhROgO8svlexBQw==" - }, - "ServiceStack.Interfaces/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "ServiceStack.Text/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - } - } -} \ No newline at end of file diff --git a/lib/netcore/ServiceStack.Common.dll b/lib/netcore/ServiceStack.Common.dll deleted file mode 100644 index 758bfc02..00000000 Binary files a/lib/netcore/ServiceStack.Common.dll and /dev/null differ diff --git a/lib/netcore/ServiceStack.Common.pdb b/lib/netcore/ServiceStack.Common.pdb deleted file mode 100644 index bdca4de6..00000000 Binary files a/lib/netcore/ServiceStack.Common.pdb and /dev/null differ diff --git a/lib/netcore/ServiceStack.Interfaces.deps.json b/lib/netcore/ServiceStack.Interfaces.deps.json deleted file mode 100644 index 90d470bc..00000000 --- a/lib/netcore/ServiceStack.Interfaces.deps.json +++ /dev/null @@ -1,455 +0,0 @@ -{ - "runtimeTarget": { - "name": ".NETStandard,Version=v1.1/", - "signature": "87332d22ff0b2d9487498e551d9b7082859e5b9c" - }, - "compilationOptions": {}, - "targets": { - ".NETStandard,Version=v1.1": {}, - ".NETStandard,Version=v1.1/": { - "ServiceStack.Interfaces/1.0.0": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Runtime.Serialization.Primitives": "4.1.1", - "NETStandard.Library": "1.6.0" - }, - "runtime": { - "ServiceStack.Interfaces.dll": {} - } - }, - "Microsoft.NETCore.Platforms/1.0.1": {}, - "Microsoft.NETCore.Targets/1.0.1": {}, - "NETStandard.Library/1.6.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Collections": "4.0.11", - "System.Collections.Concurrent": "4.0.12", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tools": "4.0.1", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.Compression": "4.1.0", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.Net.Http": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Extensions": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Runtime.InteropServices.RuntimeInformation": "4.0.0", - "System.Runtime.Numerics": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11", - "System.Xml.XDocument": "4.0.11" - } - }, - "runtime.native.System/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Collections/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Collections.Concurrent/4.0.12": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Diagnostics.Debug/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tools/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tracing/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Globalization/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.IO/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.Compression/4.1.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Linq/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Runtime": "4.1.0" - } - }, - "System.Linq.Expressions/4.1.0": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Net.Http/4.1.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Net.Primitives/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.ObjectModel/4.0.12": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Reflection/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Extensions/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Primitives/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Resources.ResourceManager/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Globalization": "4.0.11", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Runtime.Extensions/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Threading": "4.0.11", - "runtime.native.System": "4.0.0" - } - }, - "System.Runtime.Numerics/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding.Extensions/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Text.RegularExpressions/4.1.0": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Threading/4.0.11": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Threading.Tasks/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Xml.ReaderWriter/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Xml.XDocument/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Xml.ReaderWriter": "4.0.11" - } - } - } - }, - "libraries": { - "ServiceStack.Interfaces/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "Microsoft.NETCore.Platforms/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-AuiI4ZxFqt9sDBiIbgT92TSzC25YOY8cA+2VpfHqyXcXpz9pp7Su//rPZmtGl7ovEE3AFFjRrgMuaqxDxq/p0g==" - }, - "Microsoft.NETCore.Targets/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-PPc2oYN8VMKhKkCwyOk94mtf63cutoBgwp9rP6PIoMh8w7gFnUWip/sl8+xtFYexDiPHHbXx+PX5ulZPH0BXaQ==" - }, - "NETStandard.Library/1.6.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-bNoEfVhCVbgLMYHLcB1gI6AsPiB7r5nDHFsCzCygNCUfFlSgOJL038iqSFhapXVV3V98h60lBfIQ0HHQqj8/8g==" - }, - "runtime.native.System/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-uxDsy7oPffaCxj52dV8rSzNFyvvwpFHHptYWo4ToGvd+MhfaLgM244uGnbzQ1t+hSbZJCkwQilWFMcfXxiIiVQ==" - }, - "System.Collections/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-VmHryoBwnRQx3fz6fPtRH2Xl7TC5GQJ5L2H1JIZZGeSMMtcDemBjTDrDzMJCnyN/XBb3nSOXHlQJhKhzU0VRqg==" - }, - "System.Collections.Concurrent/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-+Ok1+hnWZ5rRhwr6ty1iYCAVH8wXx1aIVSVhVSZcyzlJcG6YNfHxgGXtJi0fm6jh4PEcxpEWdh4lE3877P88Fg==" - }, - "System.Diagnostics.Debug/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-S62MHt4uEhX1CQ49ymUXiRx78eRB/JosH/o62tU0CBnrQaJ9UN3Qg1wiVSOqd9xfgzB981Si1kBokaJhW0N+5g==" - }, - "System.Diagnostics.Tools/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XQ6KjwwkGIQqPkDi9ru320X5K3scsiBRCDwYVKBvYKUkHOnnjp5dlqLRfLZ7OaCPwql/3EiOXZemZ4nRsC3qrg==" - }, - "System.Diagnostics.Tracing/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-SgYmK0myHMZD5hXDPRroeviesDAvFpZ6SItj+mzPEdyA/NX8PoEXBx1BvpQsGaxeK2pOQ5IBBl4xNM4I88ufTQ==" - }, - "System.Globalization/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-MuHuQIxNFC3ndnLcdZkKqBx0wiVbtrUaZ+z7R+stGWdj628/F4bX0XAKj/vT4aV1joBVQDQQig6Of76dBE3r/Q==" - }, - "System.IO/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-OtfvEnXecJm1DHz5gVwXg0az/+PgihpYEL7vPrpEI6XtSNndHybSODLjXt4rx7RSHuN6vm7DBL0+rUe6LJizsg==" - }, - "System.IO.Compression/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-zOVmLCWb3aVDcBP2Oelu8mCS1dCoTaSFOpQbQpijuPeQxQPm6fTLEdwVULLvLVn/cYCS8ywiWEMh4nrO04kWQA==" - }, - "System.Linq/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-q0/YgSzkxZo1atYrpJeRWnp/s3kmHkZLHhtLbqhQhXA/FHu3yhSrtaSYNEg3eH/HehNgdGq9XdXEtS23LTIcZw==" - }, - "System.Linq.Expressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-/wBF/fAkXw93UwlLgQso99lk5MQQIXTIk0nvQTiToGchSqs0FwVhH/OmG9X0Zfm7LDeCXXtPKOVJ0tUXEc7Rhw==" - }, - "System.Net.Http/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-70DAk8tjoKYgYrV/3UArC7xo36hzK/Jaysxhi06SbC4R4aDU8wo7Y0ZrGOtyeRBedAqDaJSJ470JaCdId2TK+g==" - }, - "System.Net.Primitives/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-tl/51pHaQVIMdpeuDx+s5IzfxluITA9ZWcBESpukkM70qKnMFRHcDae20oqW9rruJd60R56iuKjLn/ftua/UxQ==" - }, - "System.ObjectModel/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-quRSAXHHbWah/5QPbkmZP4SsAvBi+SO5OVBzCK+cfeBvww1QqKpNXs8SWrCllPO0yC8cgPKoByLZix2+WajAkw==" - }, - "System.Reflection/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-BHn7GJpu7Wvh2Vh8CQuPIcxkm0tHtdc18VJ65jETI0tyUJlDjmoj5FYk5owpwJK2ecHd1LFexMDjtci2E+fj9g==" - }, - "System.Reflection.Extensions/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-UxdaA7ySJOv/1+7ZDKX6azEaKqq7PcqyByf4pqrM+CCI0Kf4qe6Jy7+Z9/yYp2n/UhfwKNHN6vdrhafQdyywIQ==" - }, - "System.Reflection.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-VHYeBsjjapUidsdcqf4Pb2Q8Dp51z6JPE5ACoQMM/XzxAkf0NjxVuG3b/WRmx08bX5dSErlXpp7+PIm7pXFVTg==" - }, - "System.Resources.ResourceManager/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-KIRQz3lLz1RboyW9S7+KnwxKJz0WCL7AgD6dDAI1haKOPKpYrlPErQsWxuxDU+G4TVBXrH0bj03THc9JEXiq3Q==" - }, - "System.Runtime/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-d5JHGNNxNcvc0eGihw9SxQeIMO0Jj3zXwG735XwZu7uDNQP9aIHu5XaxqxBlN00LUpildp7rTRY4/ZiK7ZAHNw==" - }, - "System.Runtime.Extensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-nQRf6j90Q46wMHIjFv1GNLFLUlGHzI0tZHOpPLuZyIpHwBP8UFZyWWmHT6Keuoj0YF8nqtCb6F1GrGD5v54bMw==" - }, - "System.Runtime.InteropServices/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-sGBGGySub1wdRddl8qFddrNMPYfOv/c79TrREDnjMFvBWzpPubcHozndOoHJpD/4tOyi4dmzuRckAyskjdjgEA==" - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-h+udLu2uLKD4y10y/+Uk6Vo55hcXh0scA1vsnXcmtI7NdeoMY1Wm+jFxbCFLB8IjD60Eq4bm3mFY6NjQEuZ2Rg==" - }, - "System.Runtime.Numerics/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-WtjbB9oVJnc0O3+xjloU7TZjmO3X41jF4ypeuIlvlEJxf1t9wYij89bOIF+0wzz6FO6x7420CFdnDOmyPCRf5A==" - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-HZ6Du5QrTG8MNJbf4e4qMO3JRAkIboGT5Fk804uZtg3Gq516S7hAqTm2UZKUHa7/6HUGdVy3AqMQKbns06G/cg==" - }, - "System.Text.Encoding/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-hzeM2B7LYy/HkddegftSFlMD8tqgPpPw+fkw/bhdnMaEPfC+c7+B+VnJzjwILccK158SOJHvKQA84MoTdVzz3w==" - }, - "System.Text.Encoding.Extensions/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-9EV5G8HSyslKwsO2sNXgOUnRA+Sai5bV7m4WkGOiKNDBzKit2dXQ8nOD34pbt4AMTY33G3LP+JLCMERjp6HSVg==" - }, - "System.Text.RegularExpressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-CVWIpxR0TE1uCQmpR30VZMRXPXWlAN/1lxGMdBYUBwG9YJIJAFLgp3N5gmt0Y1gp/ZfTilYxwL6KZv4sH81WUQ==" - }, - "System.Threading/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XM9KUm5ereNsrSTDxx8qUNyYHCIcSt5trkFY1wXQg+I50DYn+c2DCvhF1gVHEs3lMhBKEvBcIHkDq9HeOHye3w==" - }, - "System.Threading.Tasks/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-JALRGVfMPNCC2cgQuPPvzH4a0o0yunXmbSlt7Tt2EcRKiYTjdJzJOPTlJnybGXxN0Y1MsgZuxaL8UQKSHvwB8g==" - }, - "System.Xml.ReaderWriter/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-K1kvZs9CSLy5IOuKdocX9Ktmk1i1QA54h0pS5fIveQW4sRBqTBVVUkRDlp/VTeCRz3f8+sCVxspomiatflIxRg==" - }, - "System.Xml.XDocument/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-ydVR67d6gEeCt6mpntDAxmdrYkujFLlu4m5pEcqZVy8RtpTHOADxVNT3AxgcTScK3y4WdIJxhROgO8svlexBQw==" - } - } -} \ No newline at end of file diff --git a/lib/netcore/ServiceStack.Interfaces.dll b/lib/netcore/ServiceStack.Interfaces.dll deleted file mode 100644 index df6c95df..00000000 Binary files a/lib/netcore/ServiceStack.Interfaces.dll and /dev/null differ diff --git a/lib/netcore/ServiceStack.Interfaces.pdb b/lib/netcore/ServiceStack.Interfaces.pdb deleted file mode 100644 index 962e7f3e..00000000 Binary files a/lib/netcore/ServiceStack.Interfaces.pdb and /dev/null differ diff --git a/lib/netcore/ServiceStack.Text.deps.json b/lib/netcore/ServiceStack.Text.deps.json deleted file mode 100644 index 029c1cca..00000000 --- a/lib/netcore/ServiceStack.Text.deps.json +++ /dev/null @@ -1,536 +0,0 @@ -{ - "runtimeTarget": { - "name": ".NETStandard,Version=v1.1/", - "signature": "7b7e0ae4297f8834538c98cdb8bf0e7073ea7069" - }, - "compilationOptions": {}, - "targets": { - ".NETStandard,Version=v1.1": {}, - ".NETStandard,Version=v1.1/": { - "ServiceStack.Text/1.0.0": { - "dependencies": { - "System.Runtime.Serialization.Primitives": "4.1.1", - "System.Runtime.Serialization.Xml": "4.1.1", - "System.Net.Requests": "4.0.11", - "System.Dynamic.Runtime": "4.0.11", - "Microsoft.CSharp": "4.0.1", - "System.Reflection.Emit": "4.0.1", - "NETStandard.Library": "1.6.0" - }, - "runtime": { - "ServiceStack.Text.dll": {} - } - }, - "Microsoft.CSharp/4.0.1": { - "dependencies": { - "System.Dynamic.Runtime": "4.0.11", - "System.Linq.Expressions": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "Microsoft.NETCore.Platforms/1.0.1": {}, - "Microsoft.NETCore.Targets/1.0.1": {}, - "NETStandard.Library/1.6.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Collections": "4.0.11", - "System.Collections.Concurrent": "4.0.12", - "System.Diagnostics.Debug": "4.0.11", - "System.Diagnostics.Tools": "4.0.1", - "System.Diagnostics.Tracing": "4.1.0", - "System.Globalization": "4.0.11", - "System.IO": "4.1.0", - "System.IO.Compression": "4.1.0", - "System.Linq": "4.1.0", - "System.Linq.Expressions": "4.1.0", - "System.Net.Http": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Reflection.Extensions": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.Extensions": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Runtime.InteropServices.RuntimeInformation": "4.0.0", - "System.Runtime.Numerics": "4.0.1", - "System.Text.Encoding": "4.0.11", - "System.Text.Encoding.Extensions": "4.0.11", - "System.Text.RegularExpressions": "4.1.0", - "System.Threading": "4.0.11", - "System.Threading.Tasks": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11", - "System.Xml.XDocument": "4.0.11" - } - }, - "runtime.native.System/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Collections/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Collections.Concurrent/4.0.12": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Diagnostics.Debug/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tools/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Diagnostics.Tracing/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Dynamic.Runtime/4.0.11": { - "dependencies": { - "System.Linq.Expressions": "4.1.0", - "System.ObjectModel": "4.0.12", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Globalization/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.IO/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.IO.Compression/4.1.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Linq/4.1.0": { - "dependencies": { - "System.Collections": "4.0.11", - "System.Runtime": "4.1.0" - } - }, - "System.Linq.Expressions/4.1.0": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Net.Http/4.1.0": { - "dependencies": { - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Net.Primitives/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Net.Requests/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Net.Primitives": "4.0.11", - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.ObjectModel/4.0.12": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Reflection/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.IO": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Emit/4.0.1": { - "dependencies": { - "System.IO": "4.1.0", - "System.Reflection": "4.1.0", - "System.Reflection.Emit.ILGeneration": "4.0.1", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Emit.ILGeneration/4.0.1": { - "dependencies": { - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Extensions/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Reflection.Primitives/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Resources.ResourceManager/4.0.1": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Globalization": "4.0.11", - "System.Reflection": "4.1.0", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1" - } - }, - "System.Runtime.Extensions/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices/4.1.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Reflection": "4.1.0", - "System.Reflection.Primitives": "4.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "System.Reflection": "4.1.0", - "System.Resources.ResourceManager": "4.0.1", - "System.Runtime": "4.1.0", - "System.Runtime.InteropServices": "4.1.0", - "System.Threading": "4.0.11", - "runtime.native.System": "4.0.0" - } - }, - "System.Runtime.Numerics/4.0.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Runtime.Serialization.Xml/4.1.1": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Xml.ReaderWriter": "4.0.11" - } - }, - "System.Text.Encoding/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Text.Encoding.Extensions/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11" - } - }, - "System.Text.RegularExpressions/4.1.0": { - "dependencies": { - "System.Runtime": "4.1.0" - } - }, - "System.Threading/4.0.11": { - "dependencies": { - "System.Runtime": "4.1.0", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Threading.Tasks/4.0.11": { - "dependencies": { - "Microsoft.NETCore.Platforms": "1.0.1", - "Microsoft.NETCore.Targets": "1.0.1", - "System.Runtime": "4.1.0" - } - }, - "System.Xml.ReaderWriter/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Text.Encoding": "4.0.11", - "System.Threading.Tasks": "4.0.11" - } - }, - "System.Xml.XDocument/4.0.11": { - "dependencies": { - "System.IO": "4.1.0", - "System.Runtime": "4.1.0", - "System.Xml.ReaderWriter": "4.0.11" - } - } - } - }, - "libraries": { - "ServiceStack.Text/1.0.0": { - "type": "project", - "serviceable": false, - "sha512": "" - }, - "Microsoft.CSharp/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-li1uN0vRls5leO1IhwokUOSC8yWA2ICpUGMxjxm4epzFPYnl2c/9jo1gl9TzpumH/rKIkmRUGpJ+YLWQ1XfIcQ==" - }, - "Microsoft.NETCore.Platforms/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-AuiI4ZxFqt9sDBiIbgT92TSzC25YOY8cA+2VpfHqyXcXpz9pp7Su//rPZmtGl7ovEE3AFFjRrgMuaqxDxq/p0g==" - }, - "Microsoft.NETCore.Targets/1.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-PPc2oYN8VMKhKkCwyOk94mtf63cutoBgwp9rP6PIoMh8w7gFnUWip/sl8+xtFYexDiPHHbXx+PX5ulZPH0BXaQ==" - }, - "NETStandard.Library/1.6.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-bNoEfVhCVbgLMYHLcB1gI6AsPiB7r5nDHFsCzCygNCUfFlSgOJL038iqSFhapXVV3V98h60lBfIQ0HHQqj8/8g==" - }, - "runtime.native.System/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-uxDsy7oPffaCxj52dV8rSzNFyvvwpFHHptYWo4ToGvd+MhfaLgM244uGnbzQ1t+hSbZJCkwQilWFMcfXxiIiVQ==" - }, - "System.Collections/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-VmHryoBwnRQx3fz6fPtRH2Xl7TC5GQJ5L2H1JIZZGeSMMtcDemBjTDrDzMJCnyN/XBb3nSOXHlQJhKhzU0VRqg==" - }, - "System.Collections.Concurrent/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-+Ok1+hnWZ5rRhwr6ty1iYCAVH8wXx1aIVSVhVSZcyzlJcG6YNfHxgGXtJi0fm6jh4PEcxpEWdh4lE3877P88Fg==" - }, - "System.Diagnostics.Debug/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-S62MHt4uEhX1CQ49ymUXiRx78eRB/JosH/o62tU0CBnrQaJ9UN3Qg1wiVSOqd9xfgzB981Si1kBokaJhW0N+5g==" - }, - "System.Diagnostics.Tools/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XQ6KjwwkGIQqPkDi9ru320X5K3scsiBRCDwYVKBvYKUkHOnnjp5dlqLRfLZ7OaCPwql/3EiOXZemZ4nRsC3qrg==" - }, - "System.Diagnostics.Tracing/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-SgYmK0myHMZD5hXDPRroeviesDAvFpZ6SItj+mzPEdyA/NX8PoEXBx1BvpQsGaxeK2pOQ5IBBl4xNM4I88ufTQ==" - }, - "System.Dynamic.Runtime/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-FKaQnGGKOqMRr8BpVlqv45X8IR6LPD6sExuyeZnXHm1JsWz4WAvPO65rqNHi1Q4jCJHWqrKoLm3RtJx1hKaZyg==" - }, - "System.Globalization/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-MuHuQIxNFC3ndnLcdZkKqBx0wiVbtrUaZ+z7R+stGWdj628/F4bX0XAKj/vT4aV1joBVQDQQig6Of76dBE3r/Q==" - }, - "System.IO/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-OtfvEnXecJm1DHz5gVwXg0az/+PgihpYEL7vPrpEI6XtSNndHybSODLjXt4rx7RSHuN6vm7DBL0+rUe6LJizsg==" - }, - "System.IO.Compression/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-zOVmLCWb3aVDcBP2Oelu8mCS1dCoTaSFOpQbQpijuPeQxQPm6fTLEdwVULLvLVn/cYCS8ywiWEMh4nrO04kWQA==" - }, - "System.Linq/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-q0/YgSzkxZo1atYrpJeRWnp/s3kmHkZLHhtLbqhQhXA/FHu3yhSrtaSYNEg3eH/HehNgdGq9XdXEtS23LTIcZw==" - }, - "System.Linq.Expressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-/wBF/fAkXw93UwlLgQso99lk5MQQIXTIk0nvQTiToGchSqs0FwVhH/OmG9X0Zfm7LDeCXXtPKOVJ0tUXEc7Rhw==" - }, - "System.Net.Http/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-70DAk8tjoKYgYrV/3UArC7xo36hzK/Jaysxhi06SbC4R4aDU8wo7Y0ZrGOtyeRBedAqDaJSJ470JaCdId2TK+g==" - }, - "System.Net.Primitives/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-tl/51pHaQVIMdpeuDx+s5IzfxluITA9ZWcBESpukkM70qKnMFRHcDae20oqW9rruJd60R56iuKjLn/ftua/UxQ==" - }, - "System.Net.Requests/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-oa/P2creoBe+l4CQEUY2HJGFlAPJuDhZlldkPtQGrM1KIJXsdmSYwIqLarGILVyYWmDbkArDgrc1d5D5Om86lQ==" - }, - "System.ObjectModel/4.0.12": { - "type": "package", - "serviceable": true, - "sha512": "sha512-quRSAXHHbWah/5QPbkmZP4SsAvBi+SO5OVBzCK+cfeBvww1QqKpNXs8SWrCllPO0yC8cgPKoByLZix2+WajAkw==" - }, - "System.Reflection/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-BHn7GJpu7Wvh2Vh8CQuPIcxkm0tHtdc18VJ65jETI0tyUJlDjmoj5FYk5owpwJK2ecHd1LFexMDjtci2E+fj9g==" - }, - "System.Reflection.Emit/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-kCzOR9me59S4BQLtQQOEzVDDw8LwRUw72oXOQCzpvvGT5p+THbuz1H3R9PQuU0ghBwXLQNIjrIX5VwZU1fNU2w==" - }, - "System.Reflection.Emit.ILGeneration/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-+9WzFJYZFrm93xXaqAfpwNjfPdpGi7GerHmpXR/uBiKyWDMi4DOXet7DE2rrm5gH3vcZUPuafz58g8WyiZ/rrQ==" - }, - "System.Reflection.Extensions/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-UxdaA7ySJOv/1+7ZDKX6azEaKqq7PcqyByf4pqrM+CCI0Kf4qe6Jy7+Z9/yYp2n/UhfwKNHN6vdrhafQdyywIQ==" - }, - "System.Reflection.Primitives/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-VHYeBsjjapUidsdcqf4Pb2Q8Dp51z6JPE5ACoQMM/XzxAkf0NjxVuG3b/WRmx08bX5dSErlXpp7+PIm7pXFVTg==" - }, - "System.Resources.ResourceManager/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-KIRQz3lLz1RboyW9S7+KnwxKJz0WCL7AgD6dDAI1haKOPKpYrlPErQsWxuxDU+G4TVBXrH0bj03THc9JEXiq3Q==" - }, - "System.Runtime/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-d5JHGNNxNcvc0eGihw9SxQeIMO0Jj3zXwG735XwZu7uDNQP9aIHu5XaxqxBlN00LUpildp7rTRY4/ZiK7ZAHNw==" - }, - "System.Runtime.Extensions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-nQRf6j90Q46wMHIjFv1GNLFLUlGHzI0tZHOpPLuZyIpHwBP8UFZyWWmHT6Keuoj0YF8nqtCb6F1GrGD5v54bMw==" - }, - "System.Runtime.InteropServices/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-sGBGGySub1wdRddl8qFddrNMPYfOv/c79TrREDnjMFvBWzpPubcHozndOoHJpD/4tOyi4dmzuRckAyskjdjgEA==" - }, - "System.Runtime.InteropServices.RuntimeInformation/4.0.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-h+udLu2uLKD4y10y/+Uk6Vo55hcXh0scA1vsnXcmtI7NdeoMY1Wm+jFxbCFLB8IjD60Eq4bm3mFY6NjQEuZ2Rg==" - }, - "System.Runtime.Numerics/4.0.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-WtjbB9oVJnc0O3+xjloU7TZjmO3X41jF4ypeuIlvlEJxf1t9wYij89bOIF+0wzz6FO6x7420CFdnDOmyPCRf5A==" - }, - "System.Runtime.Serialization.Primitives/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-HZ6Du5QrTG8MNJbf4e4qMO3JRAkIboGT5Fk804uZtg3Gq516S7hAqTm2UZKUHa7/6HUGdVy3AqMQKbns06G/cg==" - }, - "System.Runtime.Serialization.Xml/4.1.1": { - "type": "package", - "serviceable": true, - "sha512": "sha512-yqfKHkWUAdI0hdDIdD9KDzluKtZ8IIqLF3O7xIZlt6UTs1bOvFRpCvRTvGQva3Ak/ZM9/nq9IHBJ1tC4Ybcrjg==" - }, - "System.Text.Encoding/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-hzeM2B7LYy/HkddegftSFlMD8tqgPpPw+fkw/bhdnMaEPfC+c7+B+VnJzjwILccK158SOJHvKQA84MoTdVzz3w==" - }, - "System.Text.Encoding.Extensions/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-9EV5G8HSyslKwsO2sNXgOUnRA+Sai5bV7m4WkGOiKNDBzKit2dXQ8nOD34pbt4AMTY33G3LP+JLCMERjp6HSVg==" - }, - "System.Text.RegularExpressions/4.1.0": { - "type": "package", - "serviceable": true, - "sha512": "sha512-CVWIpxR0TE1uCQmpR30VZMRXPXWlAN/1lxGMdBYUBwG9YJIJAFLgp3N5gmt0Y1gp/ZfTilYxwL6KZv4sH81WUQ==" - }, - "System.Threading/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-XM9KUm5ereNsrSTDxx8qUNyYHCIcSt5trkFY1wXQg+I50DYn+c2DCvhF1gVHEs3lMhBKEvBcIHkDq9HeOHye3w==" - }, - "System.Threading.Tasks/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-JALRGVfMPNCC2cgQuPPvzH4a0o0yunXmbSlt7Tt2EcRKiYTjdJzJOPTlJnybGXxN0Y1MsgZuxaL8UQKSHvwB8g==" - }, - "System.Xml.ReaderWriter/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-K1kvZs9CSLy5IOuKdocX9Ktmk1i1QA54h0pS5fIveQW4sRBqTBVVUkRDlp/VTeCRz3f8+sCVxspomiatflIxRg==" - }, - "System.Xml.XDocument/4.0.11": { - "type": "package", - "serviceable": true, - "sha512": "sha512-ydVR67d6gEeCt6mpntDAxmdrYkujFLlu4m5pEcqZVy8RtpTHOADxVNT3AxgcTScK3y4WdIJxhROgO8svlexBQw==" - } - } -} \ No newline at end of file diff --git a/lib/netcore/ServiceStack.Text.dll b/lib/netcore/ServiceStack.Text.dll deleted file mode 100644 index 3b42b875..00000000 Binary files a/lib/netcore/ServiceStack.Text.dll and /dev/null differ diff --git a/lib/netcore/ServiceStack.Text.pdb b/lib/netcore/ServiceStack.Text.pdb deleted file mode 100644 index bf69eaf2..00000000 Binary files a/lib/netcore/ServiceStack.Text.pdb and /dev/null differ diff --git a/lib/netcore/copy.bat b/lib/netcore/copy.bat deleted file mode 100644 index ea87b3b3..00000000 --- a/lib/netcore/copy.bat +++ /dev/null @@ -1,7 +0,0 @@ -SET BUILD=Debug -REM SET BUILD=Release - -COPY ..\..\..\ServiceStack.Text\src\ServiceStack.Text\bin\%BUILD%\netstandard1.1\ServiceStack.Text.* .\ -COPY ..\..\..\ServiceStack\src\ServiceStack.Common\bin\%BUILD%\netstandard1.3\ServiceStack.Common.* .\ -COPY ..\..\..\ServiceStack\src\ServiceStack.Interfaces\bin\%BUILD%\netstandard1.1\ServiceStack.Interfaces.* .\ - diff --git a/lib/signed/ServiceStack.Common.dll b/lib/signed/ServiceStack.Common.dll deleted file mode 100644 index 098f9e12..00000000 Binary files a/lib/signed/ServiceStack.Common.dll and /dev/null differ diff --git a/lib/signed/ServiceStack.Text.XML b/lib/signed/ServiceStack.Text.XML deleted file mode 100644 index a0267c88..00000000 --- a/lib/signed/ServiceStack.Text.XML +++ /dev/null @@ -1,1400 +0,0 @@ - - - - ServiceStack.Text - - - - - Utils to load types - - - - - Find the type from the name supplied - - [typeName] or [typeName, assemblyName] - - - - - The top-most interface of the given type, if any. - - - - - Find type if it exists - - - - The type if it exists - - - - If AlwaysUseUtc is set to true then convert all DateTime to UTC. If PreserveUtc is set to true then UTC dates will not convert to local - - - - - - - Repairs an out-of-spec XML date/time string which incorrectly uses a space instead of a 'T' to separate the date from the time. - These string are occasionally generated by SQLite and can cause errors in OrmLite when reading these columns from the DB. - - The XML date/time string to repair - The repaired string. If no repairs were made, the original string is returned. - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - Get the type(string) constructor if exists - - The type. - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Class to hold - - - - - - A fast, standards-based, serialization-issue free DateTime serailizer. - - - - - Determines whether this serializer can create the specified type from a string. - - The type. - - true if this instance [can create from string] the specified type; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Deserializes from reader. - - The reader. - - - - - Serializes to string. - - The value. - - - - - Serializes to writer. - - The value. - The writer. - - - - Sets which format to use when serializing TimeSpans - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - if the is configured - to support web-friendly serialized formats, ie emitting lowercase_underscore_casing for JSON - - - - - Define how property names are mapped during deserialization - - - - - Gets or sets a value indicating if the framework should throw serialization exceptions - or continue regardless of deserialization errors. If the framework - will throw; otherwise, it will parse as many fields as possible. The default is . - - - - - Gets or sets a value indicating if the framework should always convert to UTC format instead of local time. - - - - - Gets or sets a value indicating if the framework should skip automatic conversions. - Dates will be handled literally, any included timezone encoding will be lost and the date will be treaded as DateTimeKind.Local - Utc formatted input will result in DateTimeKind.Utc output. Any input without TZ data will be set DateTimeKind.Unspecified - This will take precedence over other flags like AlwaysUseUtc - JsConfig.DateHandler = DateHandler.ISO8601 should be used when set true for consistent de/serialization. - - - - - Gets or sets a value indicating if the framework should always assume is in UTC format if Kind is Unspecified. - - - - - Gets or sets whether we should append the Utc offset when we serialize Utc dates. Defaults to no. - Only supported for when the JsConfig.DateHandler == JsonDateHandler.TimestampOffset - - - - - Gets or sets a value indicating if unicode symbols should be serialized as "\uXXXX". - - - - - Gets or sets a value indicating if the framework should call an error handler when - an exception happens during the deserialization. - - Parameters have following meaning in order: deserialized entity, property name, parsed value, property type, caught exception. - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - Sets the maximum depth to avoid circular dependencies - - - - - Set this to enable your own type construction provider. - This is helpful for integration with IoC containers where you need to call the container constructor. - Return null if you don't know how to construct the type and the parameterless constructor will be used. - - - - - Always emit type info for this type. Takes precedence over ExcludeTypeInfo - - - - - Never emit type info for this type - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - Define custom serialization fn for BCL Structs - - - - - Opt-in flag to set some Value Types to be treated as a Ref Type - - - - - Whether there is a fn (raw or otherwise) - - - - - Define custom raw serialization fn - - - - - Define custom serialization hook - - - - - Define custom after serialization hook - - - - - Define custom deserialization fn for BCL Structs - - - - - Define custom raw deserialization fn for objects - - - - - Exclude specific properties of this type from being serialized - - - - - The property names on target types must match property names in the JSON source - - - - - The property names on target types may not match the property names in the JSON source - - - - - Uses the xsd format like PT15H10M20S - - - - - Uses the standard .net ToString method of the TimeSpan class - - - - - Get JSON string value converted to T - - - - - Get JSON string value - - - - - Get JSON string value - - - - - Get unescaped string value - - - - - Get unescaped string value - - - - - Write JSON Array, Object, bool or number values as raw string - - - - - Creates an instance of a Type from a string value - - - - - Parses the specified value. - - The value. - - - - - Shortcut escape when we're sure value doesn't contain any escaped chars - - - - - - - Given a character as utf32, returns the equivalent string provided that the character - is legal json. - - - - - - - Micro-optimization keep pre-built char arrays saving a .ToCharArray() + function call (see .net implementation of .Write(string)) - - - - - Searches the string for one or more non-printable characters. - - The string to search. - True if there are any characters that require escaping. False if the value can be written verbatim. - - Micro optimizations: since quote and backslash are the only printable characters requiring escaping, removed previous optimization - (using flags instead of value.IndexOfAny(EscapeChars)) in favor of two equality operations saving both memory and CPU time. - Also slightly reduced code size by re-arranging conditions. - TODO: Possible Linq-only solution requires profiling: return value.Any(c => !c.IsPrintable() || c == QuoteChar || c == EscapeChar); - - - - - Implement the serializer using a more static approach - - - - - - Implement the serializer using a more static approach - - - - - - Pretty Thread-Safe cache class from: - http://code.google.com/p/dapper-dot-net/source/browse/Dapper/SqlMapper.cs - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - Represents an individual object, allowing access to members by-name - - - - - Get or Set the value of a named member for the underlying object - - - - - The object represented by this instance - - - - - Use the target types definition of equality - - - - - Obtain the hash of the target object - - - - - Use the target's definition of a string representation - - - - - Wraps an individual object, allowing by-name access to that instance - - - - - Provides by-name member-access to objects of a given type - - - - - Does this type support new instances via a parameterless constructor? - - - - - Create a new instance of this type - - - - - Provides a type-specific accessor, allowing by-name access for all objects of that type - - The accessor is cached internally; a pre-existing accessor may be returned - - - - Get or set the value of a named member on the target instance - - - - - Generic implementation of object pooling pattern with predefined pool size limit. The main - purpose is that limited number of frequently used objects can be kept in the pool for - further recycling. - - Notes: - 1) it is not the goal to keep all returned objects. Pool is not meant for storage. If there - is no space in the pool, extra returned objects will be dropped. - - 2) it is implied that if object was obtained from a pool, the caller will return it back in - a relatively short time. Keeping checked out objects for long durations is ok, but - reduces usefulness of pooling. Just new up your own. - - Not returning objects to the pool in not detrimental to the pool's work, but is a bad practice. - Rationale: - If there is no intent for reusing the object, do not use pool - just use "new". - - - - - Not using System.Func{T} because this file is linked into the (debugger) Formatter, - which does not have that type (since it compiles against .NET 2.0). - - - - - Produces an instance. - - - Search strategy is a simple linear probing which is chosen for it cache-friendliness. - Note that Free will try to store recycled objects close to the start thus statistically - reducing how far we will typically search. - - - - - Returns objects to the pool. - - - Search strategy is a simple linear probing which is chosen for it cache-friendliness. - Note that Free will try to store recycled objects close to the start thus statistically - reducing how far we will typically search in Allocate. - - - - - Removes an object from leak tracking. - - This is called when an object is returned to the pool. It may also be explicitly - called if an object allocated from the pool is intentionally not being returned - to the pool. This can be of use with pooled arrays if the consumer wants to - return a larger array to the pool than was originally allocated. - - - - - this is RAII object to automatically release pooled object when its owning pool - - - - - Shared object pool for roslyn - - Use this shared pool if only concern is reducing object allocations. - if perf of an object pool itself is also a concern, use ObjectPool directly. - - For example, if you want to create a million of small objects within a second, - use the ObjectPool directly. it should have much less overhead than using this. - - - - - pool that uses default constructor with 100 elements pooled - - - - - pool that uses default constructor with 20 elements pooled - - - - - pool that uses string as key with StringComparer.OrdinalIgnoreCase as key comparer - - - - - pool that uses string as element with StringComparer.OrdinalIgnoreCase as element comparer - - - - - pool that uses string as element with StringComparer.Ordinal as element comparer - - - - - Used to reduce the # of temporary byte[]s created to satisfy serialization and - other I/O requests - - - - pooled memory : 4K * 512 = 4MB - - - - Reusable StringBuilder ThreadStatic Cache - - - - - Alternative Reusable StringBuilder ThreadStatic Cache - - - - - Reusable StringWriter ThreadStatic Cache - - - - - Alternative Reusable StringWriter ThreadStatic Cache - - - - - Manages pools of RecyclableMemoryStream objects. - - - There are two pools managed in here. The small pool contains same-sized buffers that are handed to streams - as they write more data. - - For scenarios that need to call GetBuffer(), the large pool contains buffers of various sizes, all - multiples of LargeBufferMultiple (1 MB by default). They are split by size to avoid overly-wasteful buffer - usage. There should be far fewer 8 MB buffers than 1 MB buffers, for example. - - - - - Generic delegate for handling events without any arguments. - - - - - Delegate for handling large buffer discard reports. - - Reason the buffer was discarded. - - - - Delegate for handling reports of stream size when streams are allocated - - Bytes allocated. - - - - Delegate for handling periodic reporting of memory use statistics. - - Bytes currently in use in the small pool. - Bytes currently free in the small pool. - Bytes currently in use in the large pool. - Bytes currently free in the large pool. - - - - pools[0] = 1x largeBufferMultiple buffers - pools[1] = 2x largeBufferMultiple buffers - etc., up to maximumBufferSize - - - - - Initializes the memory manager with the default block/buffer specifications. - - - - - Initializes the memory manager with the given block requiredSize. - - Size of each block that is pooled. Must be > 0. - Each large buffer will be a multiple of this value. - Buffers larger than this are not pooled - blockSize is not a positive number, or largeBufferMultiple is not a positive number, or maximumBufferSize is less than blockSize. - maximumBufferSize is not a multiple of largeBufferMultiple - - - - The size of each block. It must be set at creation and cannot be changed. - - - - - All buffers are multiples of this number. It must be set at creation and cannot be changed. - - - - - Gets or sets the maximum buffer size. - - Any buffer that is returned to the pool that is larger than this will be - discarded and garbage collected. - - - - Number of bytes in small pool not currently in use - - - - - Number of bytes currently in use by stream from the small pool - - - - - Number of bytes in large pool not currently in use - - - - - Number of bytes currently in use by streams from the large pool - - - - - How many blocks are in the small pool - - - - - How many buffers are in the large pool - - - - - How many bytes of small free blocks to allow before we start dropping - those returned to us. - - - - - How many bytes of large free buffers to allow before we start dropping - those returned to us. - - - - - Maximum stream capacity in bytes. Attempts to set a larger capacity will - result in an exception. - - A value of 0 indicates no limit. - - - - Whether to save callstacks for stream allocations. This can help in debugging. - It should NEVER be turned on generally in production. - - - - - Whether dirty buffers can be immediately returned to the buffer pool. E.g. when GetBuffer() is called on - a stream and creates a single large buffer, if this setting is enabled, the other blocks will be returned - to the buffer pool immediately. - Note when enabling this setting that the user is responsible for ensuring that any buffer previously - retrieved from a stream which is subsequently modified is not used after modification (as it may no longer - be valid). - - - - - Removes and returns a single block from the pool. - - A byte[] array - - - - Returns a buffer of arbitrary size from the large buffer pool. This buffer - will be at least the requiredSize and always be a multiple of largeBufferMultiple. - - The minimum length of the buffer - The tag of the stream returning this buffer, for logging if necessary. - A buffer of at least the required size. - - - - Returns the buffer to the large pool - - The buffer to return. - The tag of the stream returning this buffer, for logging if necessary. - buffer is null - buffer.Length is not a multiple of LargeBufferMultiple (it did not originate from this pool) - - - - Returns the blocks to the pool - - Collection of blocks to return to the pool - The tag of the stream returning these blocks, for logging if necessary. - blocks is null - blocks contains buffers that are the wrong size (or null) for this memory manager - - - - Retrieve a new MemoryStream object with no tag and a default initial capacity. - - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and a default initial capacity. - - A tag which can be used to track the source of the stream. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and at least the given capacity. - - A tag which can be used to track the source of the stream. - The minimum desired capacity for the stream. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and at least the given capacity, possibly using - a single continugous underlying buffer. - - Retrieving a MemoryStream which provides a single contiguous buffer can be useful in situations - where the initial size is known and it is desirable to avoid copying data between the smaller underlying - buffers to a single large one. This is most helpful when you know that you will always call GetBuffer - on the underlying stream. - A tag which can be used to track the source of the stream. - The minimum desired capacity for the stream. - Whether to attempt to use a single contiguous buffer. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and with contents copied from the provided - buffer. The provided buffer is not wrapped or used after construction. - - The new stream's position is set to the beginning of the stream when returned. - A tag which can be used to track the source of the stream. - The byte buffer to copy data from. - The offset from the start of the buffer to copy from. - The number of bytes to copy from the buffer. - A MemoryStream. - - - - Triggered when a new block is created. - - - - - Triggered when a new block is created. - - - - - Triggered when a new large buffer is created. - - - - - Triggered when a new stream is created. - - - - - Triggered when a stream is disposed. - - - - - Triggered when a stream is finalized. - - - - - Triggered when a stream is finalized. - - - - - Triggered when a user converts a stream to array. - - - - - Triggered when a large buffer is discarded, along with the reason for the discard. - - - - - Periodically triggered to report usage statistics. - - - - - MemoryStream implementation that deals with pooling and managing memory streams which use potentially large - buffers. - - - This class works in tandem with the RecylableMemoryStreamManager to supply MemoryStream - objects to callers, while avoiding these specific problems: - 1. LOH allocations - since all large buffers are pooled, they will never incur a Gen2 GC - 2. Memory waste - A standard memory stream doubles its size when it runs out of room. This - leads to continual memory growth as each stream approaches the maximum allowed size. - 3. Memory copying - Each time a MemoryStream grows, all the bytes are copied into new buffers. - This implementation only copies the bytes when GetBuffer is called. - 4. Memory fragmentation - By using homogeneous buffer sizes, it ensures that blocks of memory - can be easily reused. - - The stream is implemented on top of a series of uniformly-sized blocks. As the stream's length grows, - additional blocks are retrieved from the memory manager. It is these blocks that are pooled, not the stream - object itself. - - The biggest wrinkle in this implementation is when GetBuffer() is called. This requires a single - contiguous buffer. If only a single block is in use, then that block is returned. If multiple blocks - are in use, we retrieve a larger buffer from the memory manager. These large buffers are also pooled, - split by size--they are multiples of a chunk size (1 MB by default). - - Once a large buffer is assigned to the stream the blocks are NEVER again used for this stream. All operations take place on the - large buffer. The large buffer can be replaced by a larger buffer from the pool as needed. All blocks and large buffers - are maintained in the stream until the stream is disposed (unless AggressiveBufferReturn is enabled in the stream manager). - - - - - - All of these blocks must be the same size - - - - - This is only set by GetBuffer() if the necessary buffer is larger than a single block size, or on - construction if the caller immediately requests a single large buffer. - - If this field is non-null, it contains the concatenation of the bytes found in the individual - blocks. Once it is created, this (or a larger) largeBuffer will be used for the life of the stream. - - - - - This list is used to store buffers once they're replaced by something larger. - This is for the cases where you have users of this class that may hold onto the buffers longer - than they should and you want to prevent race conditions which could corrupt the data. - - - - - Unique identifier for this stream across it's entire lifetime - - Object has been disposed - - - - A temporary identifier for the current usage of this stream. - - Object has been disposed - - - - Gets the memory manager being used by this stream. - - Object has been disposed - - - - Callstack of the constructor. It is only set if MemoryManager.GenerateCallStacks is true, - which should only be in debugging situations. - - - - - Callstack of the Dispose call. It is only set if MemoryManager.GenerateCallStacks is true, - which should only be in debugging situations. - - - - - This buffer exists so that WriteByte can forward all of its calls to Write - without creating a new byte[] buffer on every call. - - - - - Allocate a new RecyclableMemoryStream object. - - The memory manager - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - The initial requested size to prevent future allocations - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - The initial requested size to prevent future allocations - An initial buffer to use. This buffer will be owned by the stream and returned to the memory manager upon Dispose. - - - - Returns the memory used by this stream back to the pool. - - Whether we're disposing (true), or being called by the finalizer (false) - This method is not thread safe and it may not be called more than once. - - - - Equivalent to Dispose - - - - - Gets or sets the capacity - - Capacity is always in multiples of the memory manager's block size, unless - the large buffer is in use. Capacity never decreases during a stream's lifetime. - Explicitly setting the capacity to a lower value than the current value will have no effect. - This is because the buffers are all pooled by chunks and there's little reason to - allow stream truncation. - - Object has been disposed - - - - Gets the number of bytes written to this stream. - - Object has been disposed - - - - Gets the current position in the stream - - Object has been disposed - - - - Whether the stream can currently read - - - - - Whether the stream can currently seek - - - - - Always false - - - - - Whether the stream can currently write - - - - - Returns a single buffer containing the contents of the stream. - The buffer may be longer than the stream length. - - A byte[] buffer - IMPORTANT: Doing a Write() after calling GetBuffer() invalidates the buffer. The old buffer is held onto - until Dispose is called, but the next time GetBuffer() is called, a new buffer from the pool will be required. - Object has been disposed - - - - Returns a new array with a copy of the buffer's contents. You should almost certainly be using GetBuffer combined with the Length to - access the bytes in this stream. Calling ToArray will destroy the benefits of pooled buffers, but it is included - for the sake of completeness. - - Object has been disposed - - - - Reads from the current position into the provided buffer - - Destination buffer - Offset into buffer at which to start placing the read bytes. - Number of bytes to read. - The number of bytes read - buffer is null - offset or count is less than 0 - offset subtracted from the buffer length is less than count - Object has been disposed - - - - Writes the buffer to the stream - - Source buffer - Start position - Number of bytes to write - buffer is null - offset or count is negative - buffer.Length - offset is not less than count - Object has been disposed - - - - Returns a useful string for debugging. This should not normally be called in actual production code. - - - - - Writes a single byte to the current position in the stream. - - byte value to write - Object has been disposed - - - - Reads a single byte from the current position in the stream. - - The byte at the current position, or -1 if the position is at the end of the stream. - Object has been disposed - - - - Sets the length of the stream - - value is negative or larger than MaxStreamLength - Object has been disposed - - - - Sets the position to the offset from the seek location - - How many bytes to move - From where - The new position - Object has been disposed - offset is larger than MaxStreamLength - Invalid seek origin - Attempt to set negative position - - - - Synchronously writes this stream's bytes to the parameter stream. - - Destination stream - Important: This does a synchronous write, which may not be desired in some situations - - - - Release the large buffer (either stores it for eventual release or returns it immediately). - - - - - A class to allow the conversion of doubles to string representations of - their exact decimal values. The implementation aims for readability over - efficiency. - - Courtesy of @JonSkeet - http://www.yoda.arachsys.com/csharp/DoubleConverter.cs - - - - - - - - How many digits are *after* the decimal point - - - - - Constructs an arbitrary decimal expansion from the given long. - The long must not be negative. - - - - - Multiplies the current expansion by the given amount, which should - only be 2 or 5. - - - - - Shifts the decimal point; a negative value makes - the decimal expansion bigger (as fewer digits come after the - decimal place) and a positive value makes the decimal - expansion smaller. - - - - - Removes leading/trailing zeroes from the expansion. - - - - - Converts the value to a proper decimal string representation. - - - - - Creates an instance of a Type from a string value - - - - - Determines whether the specified type is convertible from string. - - The type. - - true if the specified type is convertible from string; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Parses the specified type. - - The type. - The value. - - - - - Useful extension method to get the Dictionary[string,string] representation of any POCO type. - - - - - - Recursively prints the contents of any POCO object in a human-friendly, readable format - - - - - - Print Dump to Console.WriteLine - - - - - Print string.Format to Console.WriteLine - - - - - Parses the specified value. - - The value. - - - - - Populate an object with Example data. - - - - - - - Populates the object with example data. - - - Tracks how deeply nested we are - - - - - Public Code API to register commercial license for ServiceStack. - - - - - Internal Utilities to verify licensing - - - - - Maps the path of a file in the context of a VS project - - the relative path - the absolute path - Assumes static content is two directories above the /bin/ directory, - eg. in a unit test scenario the assembly would be in /bin/Debug/. - - - - Maps the path of a file in a self-hosted scenario - - the relative path - the absolute path - Assumes static content is copied to /bin/ folder with the assemblies - - - - Maps the path of a file in an Asp.Net hosted scenario - - the relative path - the absolute path - Assumes static content is in the parent folder of the /bin/ directory - - - - Implement the serializer using a more static approach - - - - - - Creates a new instance of type. - First looks at JsConfig.ModelFactory before falling back to CreateInstance - - - - - Creates a new instance of type. - First looks at JsConfig.ModelFactory before falling back to CreateInstance - - - - - Creates a new instance from the default constructor of type - - - - - Add a Property attribute at runtime. - Not threadsafe, should only add attributes on Startup. - - - - - Add a Property attribute at runtime. - Not threadsafe, should only add attributes on Startup. - - - - - @jonskeet: Collection of utility methods which operate on streams. - r285, February 26th 2009: http://www.yoda.arachsys.com/csharp/miscutil/ - - - - - Reads the given stream up to the end, returning the data as a byte - array. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer size. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer for transferring data. Note that the - current contents of the buffer is ignored, so the buffer needn't - be cleared beforehand. - - - - - Copies all the data from one stream into another. - - - - - Copies all the data from one stream into another, using a buffer - of the given size. - - - - - Copies all the data from one stream into another, using the given - buffer for transferring data. Note that the current contents of - the buffer is ignored, so the buffer needn't be cleared beforehand. - - - - - Reads exactly the given number of bytes from the specified stream. - If the end of the stream is reached before the specified amount - of data is read, an exception is thrown. - - - - - Reads into a buffer, filling it completely. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Same as ReadExactly, but without the argument checks. - - - - - Converts from base: 0 - 62 - - The source. - From. - To. - - - - - Skip the encoding process for 'safe strings' - - - - - - diff --git a/lib/signed/ServiceStack.Text.dll b/lib/signed/ServiceStack.Text.dll deleted file mode 100644 index 5f398461..00000000 Binary files a/lib/signed/ServiceStack.Text.dll and /dev/null differ diff --git a/lib/tests/Moq.dll b/lib/tests/Moq.dll deleted file mode 100644 index abcb72ee..00000000 Binary files a/lib/tests/Moq.dll and /dev/null differ diff --git a/lib/tests/Moq.pdb b/lib/tests/Moq.pdb deleted file mode 100644 index 034ab186..00000000 Binary files a/lib/tests/Moq.pdb and /dev/null differ diff --git a/lib/tests/Northwind.Common.dll b/lib/tests/Northwind.Common.dll deleted file mode 100644 index b33ffb10..00000000 Binary files a/lib/tests/Northwind.Common.dll and /dev/null differ diff --git a/lib/tests/RazorEngine.dll b/lib/tests/RazorEngine.dll deleted file mode 100644 index 2b26c7d3..00000000 Binary files a/lib/tests/RazorEngine.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Client.dll b/lib/tests/ServiceStack.Client.dll deleted file mode 100644 index 903b0362..00000000 Binary files a/lib/tests/ServiceStack.Client.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Client.xml b/lib/tests/ServiceStack.Client.xml deleted file mode 100644 index 4f0a6b55..00000000 --- a/lib/tests/ServiceStack.Client.xml +++ /dev/null @@ -1,435 +0,0 @@ - - - - ServiceStack.Client - - - - Need to provide async request options - http://msdn.microsoft.com/en-us/library/86wf6409(VS.71).aspx - - - - The request filter is called before any request. - This request filter is executed globally. - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - This response action is executed globally. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Called before request resend, when the initial request required authentication - - - - - The request filter is called before any request. - This request filter only works with the instance where it was set (not global). - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - The ResultsFilter is called before the Request is sent allowing you to return a cached response. - - - - - The ResultsFilterResponse is called before returning the response allowing responses to be cached. - - - - - Called with requestUri, ResponseType when server returns 304 NotModified - - - - - Useful .NET Encryption Utils from: - https://msdn.microsoft.com/en-us/library/system.security.cryptography.rsacryptoserviceprovider(v=vs.110).aspx - - - - - Returns the next message from queueName or null if no message - - - - - - - Generic Proxy for service calls. - - The service Contract - - - - Returns the transparent proxy for the service call - - - - - Creates the error response from the values provided. - - If the errorCode is empty it will use the first validation error code, - if there is none it will throw an error. - - The error code. - The error message. - The validation errors. - - - - - Default MaxStringContentLength is 8k, and throws an exception when reached - - - - - Serializer cache of delegates required to create a type from a string map (e.g. for REST urls) - - - - - Gets the namespace from an attribute marked on the type's definition - - - Namespace of type - - - Need to provide async request options - http://msdn.microsoft.com/en-us/library/86wf6409(VS.71).aspx - - - - The request filter is called before any request. - This request filter is executed globally. - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - This response action is executed globally. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Gets the collection of headers to be added to outgoing requests. - - - - - Sets all baseUri properties, using the Format property for the SyncReplyBaseUri and AsyncOneWayBaseUri - - Base URI of the service - - - - Whether to Accept Gzip,Deflate Content-Encoding and to auto decompress responses - - - - - The user name for basic authentication - - - - - The password for basic authentication - - - - - Sets the username and the password for basic authentication. - - - - - The Authorization Bearer Token to send with this request - - - - - Whether to execute async callbacks on the same Synchronization Context it was called from. - - - - - Gets or sets authentication information for the request. - Warning: It's recommened to use and for basic auth. - This property is only used for IIS level authentication. - - - - - Determines if the basic auth header should be sent with every request. - By default, the basic auth header is only sent when "401 Unauthorized" is returned. - - - - - Specifies if cookies should be stored - - - - - Called before request resend, when the initial request required authentication - - - - - The request filter is called before any request. - This request filter only works with the instance where it was set (not global). - - - - - The ResultsFilter is called before the Request is sent allowing you to return a cached response. - - - - - The ResultsFilterResponse is called before returning the response allowing responses to be cached. - - - - - Called with requestUri, ResponseType when server returns 304 NotModified - - - - - The response action is called once the server response is available. - It will allow you to access raw response information. - Note that you should NOT consume the response stream as this is handled by ServiceStack - - - - - Called by Send method if an exception occurs, for instance a System.Net.WebException because the server - returned an HTTP error code. Override if you want to handle specific exceptions or always want to parse the - response to a custom ErrorResponse DTO type instead of ServiceStack's ErrorResponse class. In case ex is a - System.Net.WebException, do not use - createWebRequest/getResponse/HandleResponse<TResponse> to parse the response - because that will result in the same exception again. Use - ThrowWebServiceException<YourErrorResponseType> to parse the response and to throw a - WebServiceException containing the parsed DTO. Then override Send to handle that exception. - - - - - APIs returning HttpWebResponse must be explicitly Disposed, e.g using (var res = client.Get(url)) { ... } - - - - - APIs returning HttpWebResponse must be explicitly Disposed, e.g using (var res = client.Get(url)) { ... } - - - - - Compresses the specified text using the default compression method: Deflate - - The text. - Type of the compression. - - - - - Compresses the specified text using the default compression method: Deflate - - - - - Decompresses the specified gz buffer using the default compression method: Inflate - - The gz buffer. - Type of the compression. - - - - - Decompresses the specified gz buffer using the default compression method: Inflate - - - - - Donated by Ivan Korneliuk from his post: - http://korneliuk.blogspot.com/2012/08/servicestack-reusing-dtos.html - - Modified to only allow using routes matching the supplied HTTP Verb - - - - - Generate a url from a Request DTO. Pretty URL generation require Routes to be defined using `[Route]` on the Request DTO - - - - - The exception which is thrown when a validation error occurred. - This validation is serialized in a extra clean and human-readable way by ServiceStack. - - - - - Returns the first error code - - The error code. - - - - Used if we need to serialize this exception to XML - - - - - - Encapsulates a validation result. - - - - - Gets or sets the success code. - - The success code. - - - - Gets or sets the error code. - - The error code. - - - - Gets or sets the success message. - - The success message. - - - - Gets or sets the error message. - - The error message. - - - - The errors generated by the validation. - - - - - Returns True if the validation was successful (errors list is empty). - - - - - Constructs a new ValidationResult - - - - - Constructs a new ValidationResult - - A list of validation results - - - - Initializes a new instance of the class. - - The errors. - The success code. - The error code. - - - - Merge errors from another - - - - - - Adds the singleton instance of to an endpoint on the client. - - - Based on http://megakemp.wordpress.com/2009/02/06/managing-shared-cookies-in-wcf/ - - - - - Adds the singleton of the class to the client endpoint's message inspectors. - - The endpoint that is to be customized. - The client runtime to be customized. - - - - Maintains a copy of the cookies contained in the incoming HTTP response received from any service - and appends it to all outgoing HTTP requests. - - - This class effectively allows to send any received HTTP cookies to different services, - reproducing the same functionality available in ASMX Web Services proxies with the class. - Based on http://megakemp.wordpress.com/2009/02/06/managing-shared-cookies-in-wcf/ - - - - - Initializes a new instance of the class. - - - - - Gets the singleton instance. - - - - - Inspects a message after a reply message is received but prior to passing it back to the client application. - - The message to be transformed into types and handed back to the client application. - Correlation state data. - - - - Inspects a message before a request message is sent to a service. - - The message to be sent to the service. - The client object channel. - - Null since no message correlation is used. - - - - - Specifies if cookies should be stored - - - - - Naming convention for the request's Response DTO - - - - - Shortcut to get the ResponseStatus whether it's bare or inside a IHttpResult - - - - - - diff --git a/lib/tests/ServiceStack.Common.Tests.dll b/lib/tests/ServiceStack.Common.Tests.dll deleted file mode 100644 index dc558216..00000000 Binary files a/lib/tests/ServiceStack.Common.Tests.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Common.Tests.dll.config b/lib/tests/ServiceStack.Common.Tests.dll.config deleted file mode 100644 index 80ff7a6b..00000000 --- a/lib/tests/ServiceStack.Common.Tests.dll.config +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lib/tests/ServiceStack.Common.Tests.pdb b/lib/tests/ServiceStack.Common.Tests.pdb deleted file mode 100644 index b3165d0d..00000000 Binary files a/lib/tests/ServiceStack.Common.Tests.pdb and /dev/null differ diff --git a/lib/tests/ServiceStack.Common.dll b/lib/tests/ServiceStack.Common.dll deleted file mode 100644 index 03bdb368..00000000 Binary files a/lib/tests/ServiceStack.Common.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Common.xml b/lib/tests/ServiceStack.Common.xml deleted file mode 100644 index abc4ab97..00000000 --- a/lib/tests/ServiceStack.Common.xml +++ /dev/null @@ -1,410 +0,0 @@ - - - - ServiceStack.Common - - - - - Categories of sql statements. - - - - - Unknown - - - - - DML statements that alter database state, e.g. INSERT, UPDATE - - - - - Statements that return a single record - - - - - Statements that iterate over a result set - - - - - A callback for ProfiledDbConnection and family - - - - - Called when a command starts executing - - - - - - - Called when a reader finishes executing - - - - - - - - Called when a reader is done iterating through the data - - - - - - Called when an error happens during execution of a command - - - - - - - - True if the profiler instance is active - - - - - Wraps a database connection, allowing sql execution timings to be collected when a session is started. - - - - - Returns a new that wraps , - providing query execution profiling. If profiler is null, no profiling will occur. - - Your provider-specific flavor of connection, e.g. SqlConnection, OracleConnection - The currently started or null. - Determines whether the ProfiledDbConnection will dispose the underlying connection. - - - - The underlying, real database connection to your db provider. - - - - - The current profiler instance; could be null. - - - - - The raw connection this is wrapping - - - - - Wrapper for a db provider factory to enable profiling - - - - - Every provider factory must have an Instance public field - - - - - Allow to re-init the provider factory. - - - - - - - proxy - - - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - proxy - - - - - Return T[0] when enumerable is null, safe to use in enumerations like foreach - - - - - Gets the textual description of the enum if it has one. e.g. - - - enum UserColors - { - [Description("Bright Red")] - BrightRed - } - UserColors.BrightRed.ToDescription(); - - - - - - - - Creates a Console Logger, that logs all messages to: System.Console - - Made public so its testable - - - - - Default logger is to Console.WriteLine - - Made public so its testable - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - - - - Creates a Debug Logger, that logs all messages to: System.Diagnostics.Debug - - Made public so its testable - - - - - Default logger is to System.Diagnostics.Debug.WriteLine - - Made public so its testable - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - - - - Func to get the Strongly-typed field - - - - - Required to cast the return ValueType to an object for caching - - - - - Func to set the Strongly-typed field - - - - - Required to cast the ValueType to an object for caching - - - - - Required to cast the ValueType to an object for caching - - - - - Func to get the Strongly-typed field - - - - - Required to cast the return ValueType to an object for caching - - - - - Func to set the Strongly-typed field - - - - - Required to cast the ValueType to an object for caching - - - - - Required to cast the ValueType to an object for caching - - - - - Useful IPAddressExtensions from: - http://blogs.msdn.com/knom/archive/2008/12/31/ip-address-calculations-with-c-subnetmasks-networks.aspx - - - - - - Gets the ipv4 addresses from all Network Interfaces that have Subnet masks. - - - - - - Gets the ipv6 addresses from all Network Interfaces. - - - - - - Common functionality when creating adapters - - - - - Executes the specified expression. - - - The action. - - - - - Executes the specified action (for void methods). - - The action. - - - - Note: InMemoryLog keeps all logs in memory, so don't use it long running exceptions - - Returns a thread-safe InMemoryLog which you can use while *TESTING* - to provide a detailed analysis of your logs. - - - - - Creates a Unified Resource Name (URN) with the following formats: - - - urn:{TypeName}:{IdFieldValue} e.g. urn:UserSession:1 - - urn:{TypeName}:{IdFieldName}:{IdFieldValue} e.g. urn:UserSession:UserId:1 - - - - - - - Provide the an option for the callee to block until all commands are executed - - - - - - - Invokes the action provided and returns true if no excpetion was thrown. - Otherwise logs the exception and returns false if an exception was thrown. - - The action. - - - - - Runs an action for a minimum of runForMs - - What to run - Minimum ms to run for - time elapsed in micro seconds - - - - Returns average microseconds an action takes when run for the specified runForMs - - What to run - How many times to run for each iteration - Minimum ms to run for - - - - - - - diff --git a/lib/tests/ServiceStack.Interfaces.dll b/lib/tests/ServiceStack.Interfaces.dll deleted file mode 100644 index 9495bd74..00000000 Binary files a/lib/tests/ServiceStack.Interfaces.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Interfaces.xml b/lib/tests/ServiceStack.Interfaces.xml deleted file mode 100644 index 8729e037..00000000 --- a/lib/tests/ServiceStack.Interfaces.xml +++ /dev/null @@ -1,1722 +0,0 @@ - - - - ServiceStack.Interfaces - - - - - A common interface implementation that is implemented by most cache providers - - - - - Removes the specified item from the cache. - - The identifier for the item to delete. - - true if the item was successfully removed from the cache; false otherwise. - - - - - Removes the cache for all the keys provided. - - The keys. - - - - Retrieves the specified item from the cache. - - - The identifier for the item to retrieve. - - The retrieved item, or null if the key was not found. - - - - - Increments the value of the specified key by the given amount. - The operation is atomic and happens on the server. - A non existent value at key starts at 0 - - The identifier for the item to increment. - The amount by which the client wants to increase the item. - - The new value of the item or -1 if not found. - - The item must be inserted into the cache before it can be changed. The item must be inserted as a . The operation only works with values, so -1 always indicates that the item was not found. - - - - Increments the value of the specified key by the given amount. - The operation is atomic and happens on the server. - A non existent value at key starts at 0 - - The identifier for the item to increment. - The amount by which the client wants to decrease the item. - - The new value of the item or -1 if not found. - - The item must be inserted into the cache before it can be changed. The item must be inserted as a . The operation only works with values, so -1 always indicates that the item was not found. - - - - Adds a new item into the cache at the specified cache key only if the cache is empty. - - The key used to reference the item. - The object to be inserted into the cache. - - true if the item was successfully stored in the cache; false otherwise. - - The item does not expire unless it is removed due memory pressure. - - - - Sets an item into the cache at the cache key specified regardless if it already exists or not. - - - - - Replaces the item at the cachekey specified only if an items exists at the location already. - - - - - Invalidates all data on the cache. - - - - - Retrieves multiple items from the cache. - The default value of T is set for all keys that do not exist. - - The list of identifiers for the items to retrieve. - - a Dictionary holding all items indexed by their key. - - - - - Sets multiple items to the cache. - - - The values. - - - - Removes items from cache that have keys matching the specified wildcard pattern - - The wildcard, where "*" means any sequence of characters and "?" means any single character. - - - - Removes items from the cache based on the specified regular expression pattern - - Regular expression pattern to search cache keys - - - - A light interface over a cache client. - This interface was inspired by Enyim.Caching.MemcachedClient - - Only the methods that are intended to be used are required, if you require - extra functionality you can uncomment the unused methods below as they have been - implemented in DdnMemcachedClient - - - - - Removes the specified item from the cache. - - The identifier for the item to delete. - - true if the item was successfully removed from the cache; false otherwise. - - - - - Removes the cache for all the keys provided. - - The keys. - - - - Retrieves the specified item from the cache. - - The identifier for the item to retrieve. - - The retrieved item, or null if the key was not found. - - - - - Increments the value of the specified key by the given amount. The operation is atomic and happens on the server. - - The identifier for the item to increment. - The amount by which the client wants to increase the item. - - The new value of the item or -1 if not found. - - The item must be inserted into the cache before it can be changed. The item must be inserted as a . The operation only works with values, so -1 always indicates that the item was not found. - - - - Increments the value of the specified key by the given amount. The operation is atomic and happens on the server. - - The identifier for the item to increment. - The amount by which the client wants to decrease the item. - - The new value of the item or -1 if not found. - - The item must be inserted into the cache before it can be changed. The item must be inserted as a . The operation only works with values, so -1 always indicates that the item was not found. - - - - Inserts an item into the cache with a cache key to reference its location. - - The key used to reference the item. - The object to be inserted into the cache. - - true if the item was successfully stored in the cache; false otherwise. - - The item does not expire unless it is removed due memory pressure. - - - - Inserts an item into the cache with a cache key to reference its location. - - The key used to reference the item. - The object to be inserted into the cache. - The time when the item is invalidated in the cache. - true if the item was successfully stored in the cache; false otherwise. - - - - Removes all data from the cache. - - - - - Retrieves multiple items from the cache. - - The list of identifiers for the items to retrieve. - - a Dictionary holding all items indexed by their key. - - - - - A Users Session - - - - - Set a typed value at key - - - - - - - - Get a typed value at key - - - - - - - - Store any object at key - - - - - - - Retrieves a User Session - - - - - Gets the session for this request, creates one if it doesn't exist. - - - - - - - - Gets the session for this request, creates one if it doesn't exist. - Only for ASP.NET apps. Uses the HttpContext.Current singleton. - - - - - Allow delegation of dependencies to other IOC's - - - - - Resolve Property Dependency - - - - - - - Resolve Constructor Dependency - - - - - - - BelongToAttribute - Use to indicate that a join column belongs to another table. - - - - - Compute attribute. - Use to indicate that a property is a Calculated Field - - - - - Mark types that are to be excluded from specified features - - - - - Decimal length attribute. - - - - - Explicit foreign key name. If it's null, or empty, the FK name will be autogenerated as before. - - - - - IgnoreAttribute - Use to indicate that a property is not a field in the table - properties with this attribute are ignored when building sql sentences - - - - - Primary key attribute. - use to indicate that property is part of the pk - - - - - Used to indicate that property is a row version incremented automatically by the database - - - - - Used to annotate an Entity with its DB schema - - - - - - For providers that want a cleaner API with a little more perf - - - - - - Decorate any type or property with adhoc info - - - - - Contract indication that the Response DTO has a ResponseStatus - - - - - Creates a Debug Logger, that logs all messages to: System.Diagnostics.Debug - - Made public so its testable - - - - - Factory to create ILog instances - - - - - Gets the logger. - - - - - Gets the logger. - - - - - Default logger is to System.Diagnostics.Debug.WriteLine - - Made public so its testable - - - - - Logs a message in a running application - - - - - Logs a Debug message. - - The message. - - - - Logs a Debug message and exception. - - The message. - The exception. - - - - Logs a Debug format message. - - The format. - The args. - - - - Logs a Error message. - - The message. - - - - Logs a Error message and exception. - - The message. - The exception. - - - - Logs a Error format message. - - The format. - The args. - - - - Logs a Fatal message. - - The message. - - - - Logs a Fatal message and exception. - - The message. - The exception. - - - - Logs a Error format message. - - The format. - The args. - - - - Logs an Info message and exception. - - The message. - - - - Logs an Info message and exception. - - The message. - The exception. - - - - Logs an Info format message. - - The format. - The args. - - - - Logs a Warning message. - - The message. - - - - Logs a Warning message and exception. - - The message. - The exception. - - - - Logs a Warning format message. - - The format. - The args. - - - - Gets or sets a value indicating whether this instance is debug enabled. - - - true if this instance is debug enabled; otherwise, false. - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - - - - Default logger is to System.Diagnostics.Debug.Print - - Made public so its testable - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - - - - Creates a Debug Logger, that logs all messages to: System.Diagnostics.Debug - - Made public so its testable - - - - - StringBuilderLog writes to shared StringBuffer. - Made public so its testable - - - - - Logs the specified message. - - - - - Logs the format. - - - - - Logs the specified message. - - The message. - - - - Creates a test Logger, that stores all log messages in a member list - - - - - Tests logger which stores all log messages in a member list which can be examined later - - Made public so its testable - - - - - Initializes a new instance of the class. - - The type. - - - - Initializes a new instance of the class. - - The type. - - - - Logs the specified message. - - The message. - The exception. - - - - Logs the format. - - The message. - The args. - - - - Logs the specified message. - - The message. - - - - Logging API for this library. You can inject your own implementation otherwise - will use the DebugLogFactory to write to System.Diagnostics.Debug - - - - - Gets the logger. - - - - - Gets the logger. - - - - - Gets or sets the log factory. - Use this to override the factory that is used to create loggers - - - - - Publish the specified message into the durable queue @queueName - - - - - Publish the specified message into the transient queue @queueName - - - - - Synchronous blocking get. - - - - - Non blocking get message - - - - - Acknowledge the message has been successfully received or processed - - - - - Negative acknowledgement the message was not processed correctly - - - - - Create a typed message from a raw MQ Response artefact - - - - - Create a temporary Queue for Request / Reply - - - - - - Simple definition of an MQ Host - - - - - Register DTOs and hanlders the MQ Host will process - - - - - - - Register DTOs and hanlders the MQ Host will process - - - - - - - - Get Total Current Stats for all Message Handlers - - - - - - Get the status of the service. Potential Statuses: Disposed, Stopped, Stopping, Starting, Started - - - - - - Get a Stats dump - - - - - - Start the MQ Host if not already started. - - - - - Stop the MQ Host if not already stopped. - - - - - Factory to create consumers and producers that work with this service - - - - - Get a list of all message types registered on this MQ Host - - - - - Basic implementation of IMessage[T] - - - - - - Util static generic class to create unique queue names for types - - - - - - Util class to create unique queue names for runtime types - - - - - For messaging exceptions that should by-pass the messaging service's configured - retry attempts and store the message straight into the DLQ - - - - - Wrap the common redis list operations under a IList[string] interface. - - - - - Redis transaction for typed client - - - - - - interface to queueable operation using typed redis client - - - - - - Interface to redis typed pipeline - - - - - Pipeline interface shared by typed and non-typed pipelines - - - - - Interface to operations that allow queued commands to be completed - - - - - Returns a high-level typed client API - - - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - - - - - - Returns a Read/Write ICacheClient (The default) using the hosts defined in ReadWriteHosts - - - - - - Returns a ReadOnly ICacheClient using the hosts defined in ReadOnlyHosts. - - - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - - - - - - Returns a Read/Write ICacheClient (The default) using the hosts defined in ReadWriteHosts - - - - - - Returns a ReadOnly ICacheClient using the hosts defined in ReadOnlyHosts. - - - - - - Subscribe to channels by name - - - - - - Subscribe to channels matching the supplied patterns - - - - - - The number of active subscriptions this client has - - - - - Registered handler called after client *Subscribes* to each new channel - - - - - Registered handler called when each message is received - - - - - Registered handler called when each channel is unsubscribed - - - - - Interface to redis transaction - - - - - Base transaction interface, shared by typed and non-typed transactions - - - - - interface to operation that can queue commands - - - - - Interface to redis pipeline - - - - - Gets or sets parameter name with which allowable values will be associated. - - - - - The overall description of an API. Used by Swagger. - - - - - Gets or sets verb to which applies attribute. By default applies to all verbs. - - - - - Gets or sets parameter type: It can be only one of the following: path, query, body, model, or header. - - - - - Gets or sets unique name for the parameter. Each name must be unique, even if they are associated with different paramType values. - - - - Other notes on the name field: - If paramType is body, the name is used only for UI and codegeneration. - If paramType is path, the name field must correspond to the associated path segment from the path field in the api object. - If paramType is query, the name field corresponds to the query param name. - - - - - - Gets or sets the human-readable description for the parameter. - - - - - For path, query, and header paramTypes, this field must be a primitive. For body, this can be a complex or container datatype. - - - - - For path, this is always true. Otherwise, this field tells the client whether or not the field must be supplied. - - - - - For query params, this specifies that a comma-separated list of values can be passed to the API. For path and body types, this field cannot be true. - - - - - Gets or sets route to which applies attribute, matches using StartsWith. By default applies to all routes. - - - - - The status code of a response - - - - - The description of a response status code - - - - - Marker interfaces - - - - - Implement on Request DTOs that need access to the raw Request Stream - - - - - The raw Http Request Input Stream - - - - - Resolve a dependency from the AppHost's IOC - - - - - - - This interface can be implemented by an attribute - which adds an request filter for the specific request DTO the attribute marked. - - - - - The request filter is executed before the service. - - The http request wrapper - The http response wrapper - The request DTO - - - - A new shallow copy of this filter is used on every request. - - - - - - Order in which Request Filters are executed. - <0 Executed before global request filters - >0 Executed after global request filters - - - - - This interface can be implemented by an attribute - which adds an response filter for the specific response DTO the attribute marked. - - - - - The response filter is executed after the service - - The http request wrapper - The http response wrapper - - - - A new shallow copy of this filter is used on every request. - - - - - - Order in which Response Filters are executed. - <0 Executed before global response filters - >0 Executed after global response filters - - - - - The HTTP Response Status - - - - - The HTTP Response Status Code - - - - - The HTTP Status Description - - - - - The HTTP Response ContentType - - - - - Additional HTTP Headers - - - - - Response DTO - - - - - if not provided, get's injected by ServiceStack - - - - - Holds the request call context - - - - - The padding length written with the body, to be added to ContentLength of body - - - - - A thin wrapper around ASP.NET or HttpListener's HttpRequest - - - - - A thin wrapper around each host's Request e.g: ASP.NET, HttpListener, MQ, etc - - - - - The entire string contents of Request.InputStream - - - - - - The underlying ASP.NET or HttpListener HttpRequest - - - - - The name of the service being called (e.g. Request DTO Name) - - - - - The Verb / HttpMethod or Action for this request - - - - - Optional preferences for the processing of this Request - - - - - The Request DTO, after it has been deserialized. - - - - - The request ContentType - - - - - The expected Response ContentType for this request - - - - - Whether the ResponseContentType has been explicitly overrided or whether it was just the default - - - - - Attach any data to this request that all filters and services can access. - - - - - Buffer the Request InputStream so it can be re-read - - - - - The Remote Ip as reported by Request.UserHostAddress - - - - - The Remote Ip as reported by X-Forwarded-For, X-Real-IP or Request.UserHostAddress - - - - - e.g. is https or not - - - - - Access to the multi-part/formdata files posted on this request - - - - - The value of the Referrer, null if not available - - - - - The HttpResponse - - - - - The HTTP Verb - - - - - The IP Address of the X-Forwarded-For header, null if null or empty - - - - - The Port number of the X-Forwarded-Port header, null if null or empty - - - - - The http or https scheme of the X-Forwarded-Proto header, null if null or empty - - - - - The value of the X-Real-IP header, null if null or empty - - - - - A thin wrapper around ASP.NET or HttpListener's HttpResponse - - - - - A thin wrapper around each host's Response e.g: ASP.NET, HttpListener, MQ, etc - - - - - Write once to the Response Stream then close it. - - - - - - Signal that this response has been handled and no more processing should be done. - When used in a request or response filter, no more filters or processing is done on this request. - - - - - Calls Response.End() on ASP.NET HttpResponse otherwise is an alias for Close(). - Useful when you want to prevent ASP.NET to provide it's own custom error page. - - - - - Response.Flush() and OutputStream.Flush() seem to have different behaviour in ASP.NET - - - - - The underlying ASP.NET or HttpListener HttpResponse - - - - - The Response DTO - - - - - Buffer the Response OutputStream so it can be written in 1 batch - - - - - Gets a value indicating whether this instance is closed. - - - - - Log every service request - - - - - Log a request - - The RequestContext - Request DTO - Response DTO or Exception - How long did the Request take - - - - View the most recent logs - - - - - - - Turn On/Off Session Tracking - - - - - Turn On/Off Raw Request Body Tracking - - - - - Turn On/Off Tracking of Responses - - - - - Turn On/Off Tracking of Exceptions - - - - - Limit access to /requestlogs service to role - - - - - Don't log requests of these types. - - - - - Don't log request bodys for services with sensitive information. - By default Auth and Registration requests are hidden. - - - - - Implement on services that need access to the RequestContext - - - - - Responsible for executing the operation within the specified context. - - The operation types. - - - - Returns the first matching RestPath - - - - - Executes the MQ DTO request. - - - - - Executes the MQ DTO request with the supplied requestContext - - - - - Executes the DTO request under the supplied requestContext. - - - - - Executes the DTO request with an empty RequestContext. - - - - - Allow the registration of user-defined routes for services - - - - - Maps the specified REST path to the specified request DTO. - - The type of request DTO to map - the path to. - The path to map the request DTO to. - See RouteAttribute.Path - for details on the correct format. - The same instance; - never . - - - - Maps the specified REST path to the specified request DTO, and - specifies the HTTP verbs supported by the path. - - The type of request DTO to map - the path to. - The path to map the request DTO to. - See RouteAttribute.Path - for details on the correct format. - - The comma-delimited list of HTTP verbs supported by the path, - such as "GET,PUT,DELETE". Specify empty or - to indicate that all verbs are supported. - - The same instance; - never . - - - - Maps the specified REST path to the specified request DTO, - specifies the HTTP verbs supported by the path, and indicates - the default MIME type of the returned response. - - - The type of request DTO to map the path to. - - The path to map the request DTO to. - See RouteAttribute.Path - for details on the correct format. - - The comma-delimited list of HTTP verbs supported by the path, - such as "GET,PUT,DELETE". - - The same instance; - never . - - - - Maps the specified REST path to the specified request DTO, - specifies the HTTP verbs supported by the path, and indicates - the default MIME type of the returned response. - - - Used to rank the precedences of route definitions in reverse routing. - i.e. Priorities below 0 are auto-generated have less precedence. - - - - - Maps the specified REST path to the specified request DTO, - specifies the HTTP verbs supported by the path, and indicates - the default MIME type of the returned response. - - - The type of request DTO to map the path to. - - The path to map the request DTO to. - See RouteAttribute.Path - for details on the correct format. - - The comma-delimited list of HTTP verbs supported by the path, - such as "GET,PUT,DELETE". - - - The short summary of what the REST does. - - - The longer text to explain the behaviour of the REST. - - The same instance; - never . - - - - Used to decorate Request DTO's to associate a RESTful request - path mapping with a service. Multiple attributes can be applied to - each request DTO, to map multiple paths to the service. - - - - - Initializes an instance of the class. - - - The path template to map to the request. See - RouteAttribute.Path - for details on the correct format. - - - - - Initializes an instance of the class. - - - The path template to map to the request. See - RouteAttribute.Path - for details on the correct format. - - A comma-delimited list of HTTP verbs supported by the - service. If unspecified, all verbs are assumed to be supported. - - - - Gets or sets the path template to be mapped to the request. - - - A value providing the path mapped to - the request. Never . - - - Some examples of valid paths are: - - - "/Inventory" - "/Inventory/{Category}/{ItemId}" - "/Inventory/{ItemPath*}" - - - Variables are specified within "{}" - brackets. Each variable in the path is mapped to the same-named property - on the request DTO. At runtime, ServiceStack will parse the - request URL, extract the variable values, instantiate the request DTO, - and assign the variable values into the corresponding request properties, - prior to passing the request DTO to the service object for processing. - - It is not necessary to specify all request properties as - variables in the path. For unspecified properties, callers may provide - values in the query string. For example: the URL - "http://services/Inventory?Category=Books&ItemId=12345" causes the same - request DTO to be processed as "http://services/Inventory/Books/12345", - provided that the paths "/Inventory" (which supports the first URL) and - "/Inventory/{Category}/{ItemId}" (which supports the second URL) - are both mapped to the request DTO. - - Please note that while it is possible to specify property values - in the query string, it is generally considered to be less RESTful and - less desirable than to specify them as variables in the path. Using the - query string to specify property values may also interfere with HTTP - caching. - - The final variable in the path may contain a "*" suffix - to grab all remaining segments in the path portion of the request URL and assign - them to a single property on the request DTO. - For example, if the path "/Inventory/{ItemPath*}" is mapped to the request DTO, - then the request URL "http://services/Inventory/Books/12345" will result - in a request DTO whose ItemPath property contains "Books/12345". - You may only specify one such variable in the path, and it must be positioned at - the end of the path. - - - - - Gets or sets short summary of what the route does. - - - - - Gets or sets longer text to explain the behaviour of the route. - - - - - Gets or sets a comma-delimited list of HTTP verbs supported by the service, such as - "GET,PUT,POST,DELETE". - - - A providing a comma-delimited list of HTTP verbs supported - by the service, or empty if all verbs are supported. - - - - - Used to rank the precedences of route definitions in reverse routing. - i.e. Priorities below 0 are auto-generated have less precedence. - - - - - Fallback routes have the lowest precedence, i.e. after normal Routes, static files or any matching Catch All Handlers. - - - - - Generic ResponseStatus for when Response Type can't be inferred. - In schemaless formats like JSON, JSV it has the same shape as a typed Response DTO - - - - - A log entry added by the IRequestLogger - - - - - Decorate on Request DTO's to alter the accessibility of a service and its visibility on /metadata pages - - - - - Restrict access and metadata visibility to any of the specified access scenarios - - The restrict access to scenarios. - - - - Restrict access and metadata visibility to any of the specified access scenarios - - The restrict access to scenarios. - - - - Returns the allowed set of scenarios based on the user-specified restrictions - - - - - - - Allow access but hide from metadata to requests from Localhost only - - - - - Allow access but hide from metadata to requests from Localhost and Local Intranet only - - - - - Restrict access and hide from metadata to requests from Localhost only - - - - - Restrict access and hide from metadata to requests from Localhost and Local Intranet only - - - - - Restrict access and hide from metadata to requests from External only - - - - - Sets a single access restriction - - Restrict Access to. - - - - Restrict access to any of the specified access scenarios - - Access restrictions - - - - Sets a single metadata Visibility restriction - - Restrict metadata Visibility to. - - - - Restrict metadata visibility to any of the specified access scenarios - - Visibility restrictions - - - - Converts from a User intended restriction to a flag with all the allowed attribute flags set, e.g: - - If No Network restrictions were specified all Network access types are allowed, e.g: - restrict EndpointAttributes.None => ... 111 - - If a Network restriction was specified, only it will be allowed, e.g: - restrict EndpointAttributes.LocalSubnet => ... 010 - - The returned Enum will have a flag with all the allowed attributes set - - - - - - - Error information pertaining to a particular named field. - Used for returning multiple field validation errors.s - - - - - Common ResponseStatus class that should be present on all response DTO's - - - - - Initializes a new instance of the class. - - A response status without an errorcode == success - - - - - Initializes a new instance of the class. - - A response status with an errorcode == failure - - - - - Initializes a new instance of the class. - - A response status with an errorcode == failure - - - - - Holds the custom ErrorCode enum if provided in ValidationException - otherwise will hold the name of the Exception type, e.g. typeof(Exception).Name - - A value of non-null means the service encountered an error while processing the request. - - - - - A human friendly error message - - - - - - - - - - For multiple detailed validation errors. - Can hold a specific error message for each named field. - - - - - Write a partial content result - - - - - Whether this HttpResult allows Partial Response - - - - - Sends the specified request. - - The request. - - - - diff --git a/lib/tests/ServiceStack.Messaging.Tests.dll b/lib/tests/ServiceStack.Messaging.Tests.dll deleted file mode 100644 index ec56a075..00000000 Binary files a/lib/tests/ServiceStack.Messaging.Tests.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Messaging.Tests.pdb b/lib/tests/ServiceStack.Messaging.Tests.pdb deleted file mode 100644 index a64c0f9a..00000000 Binary files a/lib/tests/ServiceStack.Messaging.Tests.pdb and /dev/null differ diff --git a/lib/tests/ServiceStack.OrmLite.dll b/lib/tests/ServiceStack.OrmLite.dll deleted file mode 100644 index 236e7724..00000000 Binary files a/lib/tests/ServiceStack.OrmLite.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.OrmLite.xml b/lib/tests/ServiceStack.OrmLite.xml deleted file mode 100644 index 09ba33a5..00000000 --- a/lib/tests/ServiceStack.OrmLite.xml +++ /dev/null @@ -1,4355 +0,0 @@ - - - - ServiceStack.OrmLite - - - - - Represents the key aspects of a sql operation - - - - - The command (sql or a stored-procedure name) to execute - - - - - The parameters associated with the command - - - - - The active transaction for the command - - - - - The effective timeout for the command - - - - - The type of command that the command-text represents - - - - - Should data be buffered before returning? - - - - - Should the plan for this query be cached? - - - - - Additional state flags against this command - - - - - Can async queries be pipelined? - - - - - Initialize the command definition - - - - - For asynchronous operations, the cancellation-token - - - - - Additional state flags that control command behaviour - - - - - No additional flags - - - - - Should data be buffered before returning? - - - - - Can async queries be pipelined? - - - - - Should the plan cache be bypassed? - - - - - Implements custom property mapping by user provided criteria (usually presence of some custom attribute with column to member mapping) - - - - - Creates custom property mapping - - Target entity type - Property selector based on target type and DataReader column name - - - - Always returns default constructor - - DataReader column names - DataReader column types - Default constructor - - - - Always returns null - - - - - - Not implemented as far as default constructor used for all cases - - - - - - - - Returns property based on selector strategy - - DataReader column name - Poperty member map - - - - This class represents a SQL string, it can be used if you need to denote your parameter is a Char vs VarChar vs nVarChar vs nChar - - - - - Default value for IsAnsi. - - - - - A value to set the default value of strings - going through Dapper. Default is 4000, any value larger than this - field will not have the default value applied. - - - - - Create a new DbString - - - - - Ansi vs Unicode - - - - - Fixed length - - - - - Length of the string -1 for max - - - - - The value of the string - - - - - Add the parameter to the command... internal use only - - - - - - - Represents default type mapping strategy used by Dapper - - - - - Creates default type map - - Entity type - - - - Finds best constructor - - DataReader column names - DataReader column types - Matching constructor or default one - - - - Returns the constructor, if any, that has the ExplicitConstructorAttribute on it. - - - - - Gets mapping for constructor parameter - - Constructor to resolve - DataReader column name - Mapping implementation - - - - Gets member mapping for column - - DataReader column name - Mapping implementation - - - - Should column names like User_Id be allowed to match properties/fields like UserId ? - - - - - The settable properties for this typemap - - - - - A bag of parameters that can be passed to the Dapper Query and Execute methods - - - - - construct a dynamic parameter bag - - - - - construct a dynamic parameter bag - - can be an anonymous type or a DynamicParameters bag - - - - Append a whole object full of params to the dynamic - EG: AddDynamicParams(new {A = 1, B = 2}) // will add property A and B to the dynamic - - - - - - Add a parameter to this dynamic parameter list - - - - - Add a parameter to this dynamic parameter list - - - - - If true, the command-text is inspected and only values that are clearly used are included on the connection - - - - - Add all the parameters needed to the command just before it executes - - The raw command prior to execution - Information about the query - - - - All the names of the param in the bag, use Get to yank them out - - - - - Get the value of a parameter - - - - The value, note DBNull.Value is not returned, instead the value is returned as null - - - - Allows you to automatically populate a target property/field from output parameters. It actually - creates an InputOutput parameter, so you can still pass data in. - - - The object whose property/field you wish to populate. - A MemberExpression targeting a property/field of the target (or descendant thereof.) - - The size to set on the parameter. Defaults to 0, or DbString.DefaultLength in case of strings. - The DynamicParameters instance - - - - Tell Dapper to use an explicit constructor, passing nulls or 0s for all parameters - - - - - Handles variances in features per DBMS - - - - - Gets the feature set based on the passed connection - - - - - True if the db supports array columns e.g. Postgresql - - - - - Represents simple member map for one of target parameter or property or field to source DataReader column - - - - - Creates instance for simple property mapping - - DataReader column name - Target property - - - - Creates instance for simple field mapping - - DataReader column name - Target property - - - - Creates instance for simple constructor parameter mapping - - DataReader column name - Target constructor parameter - - - - DataReader column name - - - - - Target member type - - - - - Target property - - - - - Target field - - - - - Target constructor parameter - - - - - Used to pass a IEnumerable<SqlDataRecord> as a SqlDataRecordListTVPParameter - - - - - Create a new instance of SqlDataRecordListTVPParameter - - - - - Dapper, a light weight object mapper for ADO.NET - - - - - Execute a query asynchronously using .NET 4.5 Task. - - Note: each row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Execute a query asynchronously using .NET 4.5 Task. - - Note: each row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Execute a query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a query asynchronously using .NET 4.5 Task. - - - - - Execute a query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a single-row query asynchronously using .NET 4.5 Task. - - - - - Execute a command asynchronously using .NET 4.5 Task. - - - - - Execute a command asynchronously using .NET 4.5 Task. - - - - - Maps a query to objects - - The first type in the recordset - The second type in the recordset - The return type - - - - - - - The field we should split and read the second object from (default: id) - Number of seconds before command execution timeout - Is it a stored proc or a batch? - - - - - Maps a query to objects - - The first type in the recordset - The second type in the recordset - The return type - - The field we should split and read the second object from (default: id) - The command to execute - - - - - - Maps a query to objects - - - - - - - - - - - - The Field we should split and read the second object from (default: id) - Number of seconds before command execution timeout - - - - - - Maps a query to objects - - - - - - - The field we should split and read the second object from (default: id) - The command to execute - - - - - - Perform a multi mapping query with 4 input parameters - - - - - - - - - - - - - - - - - - - - Perform a multi mapping query with 4 input parameters - - - - - - - - The field we should split and read the second object from (default: id) - The command to execute - - - - - - Perform a multi mapping query with 5 input parameters - - - - - Perform a multi mapping query with 5 input parameters - - - - - Perform a multi mapping query with 6 input parameters - - - - - Perform a multi mapping query with 6 input parameters - - - - - Perform a multi mapping query with 7 input parameters - - - - - Perform a multi mapping query with 7 input parameters - - - - - Perform a multi mapping query with arbitrary input parameters - - The return type - - - array of types in the recordset - - - - - The Field we should split and read the second object from (default: id) - Number of seconds before command execution timeout - Is it a stored proc or a batch? - - - - - Execute a command that returns multiple result sets, and access each in turn - - - - - Execute a command that returns multiple result sets, and access each in turn - - - - - Execute parameterized SQL and return an - - An that can be used to iterate over the results of the SQL query. - - This is typically used when the results of a query are not processed by Dapper, for example, used to fill a - or . - - - - - - - - - - Execute parameterized SQL and return an - - An that can be used to iterate over the results of the SQL query. - - This is typically used when the results of a query are not processed by Dapper, for example, used to fill a - or . - - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Called if the query cache is purged via PurgeQueryCache - - - - - Purge the query cache - - - - - Return a count of all the cached queries by dapper - - - - - - Return a list of all the queries cached by dapper - - - - - - - Deep diagnostics only: find any hash collisions in the cache - - - - - - Clear the registered type handlers - - - - - Configure the specified type to be mapped to a given db-type - - - - - Configure the specified type to be processed by a custom handler - - - - - Configure the specified type to be processed by a custom handler - - - - - Configure the specified type to be processed by a custom handler - - - - - Get the DbType that maps to a given value - - - - - OBSOLETE: For internal usage only. Lookup the DbType and handler for a given Type and member - - - - - Obtains the data as a list; if it is *already* a list, the original object is returned without - any duplication; otherwise, ToList() is invoked. - - - - - Execute parameterized SQL - - Number of rows affected - - - - Execute parameterized SQL - - Number of rows affected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL that selects a single value - - The first cell selected - - - - Execute parameterized SQL and return an - - An that can be used to iterate over the results of the SQL query. - - This is typically used when the results of a query are not processed by Dapper, for example, used to fill a - or . - - - - - - - - - - Execute parameterized SQL and return an - - An that can be used to iterate over the results of the SQL query. - - This is typically used when the results of a query are not processed by Dapper, for example, used to fill a - or . - - - - - Execute parameterized SQL and return an - - An that can be used to iterate over the results of the SQL query. - - This is typically used when the results of a query are not processed by Dapper, for example, used to fill a - or . - - - - - Return a sequence of dynamic objects with properties matching the columns - - Note: each row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Return a dynamic object with properties matching the columns - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Return a dynamic object with properties matching the columns - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Return a dynamic object with properties matching the columns - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Return a dynamic object with properties matching the columns - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Executes a query, returning the data typed as per T - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per T - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per T - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per T - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per T - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per the Type suggested - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per the Type suggested - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per the Type suggested - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per the Type suggested - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a single-row query, returning the data typed as per the Type suggested - - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a query, returning the data typed as per T - - the dynamic param may seem a bit odd, but this works around a major usability issue in vs, if it is Object vs completion gets annoying. Eg type new [space] get new object - A sequence of data of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a query, returning the data typed as per T - - the dynamic param may seem a bit odd, but this works around a major usability issue in vs, if it is Object vs completion gets annoying. Eg type new [space] get new object - A single instance or null of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a query, returning the data typed as per T - - the dynamic param may seem a bit odd, but this works around a major usability issue in vs, if it is Object vs completion gets annoying. Eg type new [space] get new object - A single or null instance of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a query, returning the data typed as per T - - the dynamic param may seem a bit odd, but this works around a major usability issue in vs, if it is Object vs completion gets annoying. Eg type new [space] get new object - A single instance of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Executes a query, returning the data typed as per T - - the dynamic param may seem a bit odd, but this works around a major usability issue in vs, if it is Object vs completion gets annoying. Eg type new [space] get new object - A single instance of the supplied type; if a basic type (int, string, etc) is queried then the data from the first column in assumed, otherwise an instance is - created per row, and a direct column-name===member-name mapping is assumed (case insensitive). - - - - - Execute a command that returns multiple result sets, and access each in turn - - - - - Execute a command that returns multiple result sets, and access each in turn - - - - - Maps a query to objects - - The first type in the record set - The second type in the record set - The return type - - - - - - - The Field we should split and read the second object from (default: id) - Number of seconds before command execution timeout - Is it a stored proc or a batch? - - - - - Maps a query to objects - - - - - - - - - - - - The Field we should split and read the second object from (default: id) - Number of seconds before command execution timeout - - - - - - Perform a multi mapping query with 4 input parameters - - - - - - - - - - - - - - - - - - - - Perform a multi mapping query with 5 input parameters - - - - - - - - - - - - - - - - - - - - - Perform a multi mapping query with 6 input parameters - - - - - - - - - - - - - - - - - - - - - - Perform a multi mapping query with 7 input parameters - - - - - - - - - - - - - - - - - - - - - - - Perform a multi mapping query with arbitrary input parameters - - The return type - - - array of types in the record set - - - - - The Field we should split and read the second object from (default: id) - Number of seconds before command execution timeout - Is it a stored proc or a batch? - - - - - Internal use only - - - - - - - Internal use only - - - - - Internal use only - - - - - Internal use only - - - - - OBSOLETE: For internal usage only. Sanitizes the paramter value with proper type casting. - - - - - Replace all literal tokens with their text form - - - - - Convert numeric values to their string form for SQL literal purposes - - - - - Internal use only - - - - - Gets type-map for the given type - - Type map instance, default is to create new instance of DefaultTypeMap - - - - Gets type-map for the given type - - Type map implementation, DefaultTypeMap instance if no override present - - - - Set custom mapping for type deserializers - - Entity type to override - Mapping rules impementation, null to remove custom map - - - - Internal use only - - - - - - - - - - - Throws a data exception, only used internally - - - - - How should connection strings be compared for equivalence? Defaults to StringComparer.Ordinal. - Providing a custom implementation can be useful for allowing multi-tenancy databases with identical - schema to share strategies. Note that usual equivalence rules apply: any equivalent connection strings - MUST yield the same hash-code. - - - - - Key used to indicate the type name associated with a DataTable - - - - - Used to pass a DataTable as a TableValuedParameter - - - - - Associate a DataTable with a type name - - - - - Fetch the type name associated with a DataTable - - - - - Used to pass a IEnumerable<SqlDataRecord> as a TableValuedParameter - - - - - Dummy type for excluding from multi-map - - - - - The grid reader provides interfaces for reading multiple result sets from a Dapper query - - - - - Read the next grid of results, returned as a dynamic object - - Note: each row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read the next grid of results, returned as a dynamic object - - Note: each row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read an individual row of the next grid of results, returned as a dynamic object - - Note: the row can be accessed via "dynamic", or by casting to an IDictionary<string,object> - - - - Read the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read an individual row of the next grid of results - - - - - Read multiple objects from a single record set on the grid - - - - - Read multiple objects from a single record set on the grid - - - - - Read multiple objects from a single record set on the grid - - - - - Read multiple objects from a single record set on the grid - - - - - Read multiple objects from a single record set on the grid - - - - - Read multiple objects from a single record set on the grid - - - - - Read multiple objects from a single record set on the grid - - - - - Has the underlying reader been consumed? - - - - - The command associated with the reader - - - - - Dispose the grid, closing and disposing both the underlying reader and command. - - - - - Implement this interface to pass an arbitrary db specific parameter to Dapper - - - - - Add the parameter needed to the command before it executes - - The raw command prior to execution - Parameter name - - - - Parses a data reader to a sequence of data of the supplied type. Used for deserializing a reader without a connection, etc. - - - - - Parses a data reader to a sequence of data of the supplied type (as object). Used for deserializing a reader without a connection, etc. - - - - - Parses a data reader to a sequence of dynamic. Used for deserializing a reader without a connection, etc. - - - - - Gets the row parser for a specific row on a data reader. This allows for type switching every row based on, for example, a TypeId column. - You could return a collection of the base type but have each more specific. - - The data reader to get the parser for the current row from - The type to get the parser for - The start column index of the object (default 0) - The length of columns to read (default -1 = all fields following startIndex) - Return null if we can't find the first column? (default false) - A parser for this specific object from this row. - - - - Gets the row parser for a specific row on a data reader. This allows for type switching every row based on, for example, a TypeId column. - You could return a collection of the base type but have each more specific. - - The data reader to get the parser for the current row from - The type to get the parser for - The start column index of the object (default 0) - The length of columns to read (default -1 = all fields following startIndex) - Return null if we can't find the first column? (default false) - A parser for this specific object from this row. - - var result = new List<BaseType>(); - using (var reader = connection.ExecuteReader(@" - select 'abc' as Name, 1 as Type, 3.0 as Value - union all - select 'def' as Name, 2 as Type, 4.0 as Value")) - { - if (reader.Read()) - { - var toFoo = reader.GetRowParser<BaseType>(typeof(Foo)); - var toBar = reader.GetRowParser<BaseType>(typeof(Bar)); - var col = reader.GetOrdinal("Type"); - do - { - switch (reader.GetInt32(col)) - { - case 1: - result.Add(toFoo(reader)); - break; - case 2: - result.Add(toBar(reader)); - break; - } - } while (reader.Read()); - } - } - - abstract class BaseType - { - public abstract int Type { get; } - } - class Foo : BaseType - { - public string Name { get; set; } - public override int Type => 1; - } - class Bar : BaseType - { - public float Value { get; set; } - public override int Type => 2; - } - - - - - Identity of a cached query in Dapper, used for extensibility - - - - - Create an identity for use with DynamicParameters, internal use only - - - - - - - - - - - - - - The sql - - - - - The command type - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Compare 2 Identity objects - - - - - - - Implement this interface to pass an arbitrary db specific set of parameters to Dapper - - - - - Add all the parameters needed to the command just before it executes - - The raw command prior to execution - Information about the query - - - - Implements this interface to provide custom member mapping - - - - - Source DataReader column name - - - - - Target member type - - - - - Target property - - - - - Target field - - - - - Target constructor parameter - - - - - Extends IDynamicParameters with facilities for executing callbacks after commands have completed - - - - - Invoked when the command has executed - - - - - Extends IDynamicParameters providing by-name lookup of parameter values - - - - - Get the value of the specified parameter (return null if not found) - - - - - Implement this interface to perform custom type-based parameter handling and value parsing - - - - - Assign the value of a parameter before a command executes - - The parameter to configure - Parameter value - - - - Parse a database value back to a typed value - - The value from the database - The type to parse to - The typed value - - - - Implement this interface to change default mapping of reader columns to type members - - - - - Finds best constructor - - DataReader column names - DataReader column types - Matching constructor or default one - - - - Returns a constructor which should *always* be used. - - Parameters will be default values, nulls for reference types and zero'd for value types. - - Use this class to force object creation away from parameterless constructors you don't control. - - - - - Gets mapping for constructor parameter - - Constructor to resolve - DataReader column name - Mapping implementation - - - - Gets member mapping for column - - DataReader column name - Mapping implementation - - - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - Represents a placeholder for a value that should be replaced as a literal value in the resulting sql - - - - - The text in the original command that should be replaced - - - - - The name of the member referred to by the token - - - - - Permits specifying certain SqlMapper values globally. - - - - - Resets all Settings to their default values - - - - - Specifies the default Command Timeout for all Queries - - - - - Indicates whether nulls in data are silently ignored (default) vs actively applied and assigned to members - - - - - Should list expansions be padded with null-valued parameters, to prevent query-plan saturation? For example, - an 'in @foo' expansion with 7, 8 or 9 values will be sent as a list of 10 values, with 3, 2 or 1 of them null. - The padding size is relative to the size of the list; "next 10" under 150, "next 50" under 500, - "next 100" under 1500, etc. - - - Caution: this should be treated with care if your DB provider (or the specific configuration) allows for null - equality (aka "ansi nulls off"), as this may change the intent of your query; as such, this is disabled by - default and must be enabled. - - - - - If set (non-negative), when performing in-list expansions of integer types ("where id in @ids", etc), switch to a string_split based - operation if there are more than this many elements. Note that this feautre requires SQL Server 2016 / compatibility level 130 (or above). - - - - - Base-class for simple type-handlers - - - - - Assign the value of a parameter before a command executes - - The parameter to configure - Parameter value - - - - Parse a database value back to a typed value - - The value from the database - The typed value - - - - Base-class for simple type-handlers that are based around strings - - - - - Parse a string into the expected type (the string will never be null) - - - - - Format an instace into a string (the instance will never be null) - - - - - Assign the value of a parameter before a command executes - - The parameter to configure - Parameter value - - - - Parse a database value back to a typed value - - The value from the database - The typed value - - - - Not intended for direct usage - - - - - Not intended for direct usage - - - - - Not intended for direct usage - - - - - A type handler for data-types that are supported by the underlying provider, but which need - a well-known UdtTypeName to be specified - - - - - Creates a new instance of UdtTypeHandler with the specified UdtTypeName - - - - - Used to pass a DataTable as a TableValuedParameter - - - - - Create a new instance of TableValuedParameter - - - - - Create a new instance of TableValuedParameter - - - - - Describes a reader that controls the lifetime of both a command and a reader, - exposing the downstream command/reader as properties. - - - - - Obtain the underlying reader - - - - - Obtain the underlying command - - - - - RDBMS Dialect this Converter is for. Injected at registration. - - - - - SQL Column Definiton used in CREATE Table. - - - - - Used in DB Params. Defaults to DbType.String - - - - - Quoted Value in SQL Statement - - - - - Customize how DB Param is initialized. Useful for supporting RDBMS-specific Types. - - - - - Parameterized value in parameterized queries - - - - - Value from DB to Populate on POCO Data Model with - - - - - Retrieve Value from ADO.NET IDataReader. Defaults to reader.GetValue() - - - - - For Types that are natively supported by RDBMS's and shouldn't be quoted - - - - - Returns results from using an SqlFormat query. E.g: - db.SelectFmt<Person>("Age > {0}", 40) - db.SelectFmt<Person>("SELECT * FROM Person WHERE Age > {0}", 40) - - - - - Returns a partial subset of results from the specified tableType using a SqlFormat query. E.g: - db.SelectFmt<EntityWithId>(typeof(Person), "Age > {0}", 40) - - - - - Returns a single scalar value using an SqlFormat query. E.g: - db.ScalarFmt<int>("SELECT COUNT(*) FROM Person WHERE Age > {0}", 40) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.ColumnFmt<string>("SELECT LastName FROM Person WHERE Age = {0}", 27) - - - - - Returns the distinct first column values in a HashSet using an SqlFormat query. E.g: - db.ColumnDistinctFmt<int>("SELECT Age FROM Person WHERE Age < {0}", 50) - - - - - Returns an Dictionary<K, List<V>> grouping made from the first two columns using an SqlFormat query. E.g: - db.LookupFmt<int, string>("SELECT Age, LastName FROM Person WHERE Age < {0}", 50) - - - - - Returns a Dictionary from the first 2 columns: Column 1 (Keys), Column 2 (Values) using an SqlFormat query. E.g: - db.DictionaryFmt<int, string>("SELECT Id, LastName FROM Person WHERE Age < {0}", 50) - - - - - Returns true if the Query returns any records, using an SqlFormat query. E.g: - db.ExistsFmt<Person>("Age = {0}", 42) - db.ExistsFmt<Person>("SELECT * FROM Person WHERE Age = {0}", 50) - - - - - Returns true if the Query returns any records that match the SqlExpression lambda, E.g: - db.Exists<Person>(q => q.Where(x => x.Age < 50)) - - - - - Returns results from a Stored Procedure using an SqlFormat query. E.g: - - - - - - Returns results from using an SqlFormat query. E.g: - db.SelectFmt<Person>("Age > {0}", 40) - db.SelectFmt<Person>("SELECT * FROM Person WHERE Age > {0}", 40) - - - - - Returns a partial subset of results from the specified tableType using a SqlFormat query. E.g: - db.SelectFmt<EntityWithId>(typeof(Person), "Age > {0}", 40) - - - - - Returns a lazyily loaded stream of results using an SqlFilter query. E.g: - db.SelectLazyFmt<Person>("Age > {0}", 40) - - - - - Returns the first result using a SqlFormat query. E.g: - db.SingleFmt<Person>("Age = {0}", 42) - - - - - Returns a single scalar value using an SqlFormat query. E.g: - db.ScalarFmt<int>("SELECT COUNT(*) FROM Person WHERE Age > {0}", 40) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.ColumnFmt<string>("SELECT LastName FROM Person WHERE Age = {0}", 27) - - - - - Returns the distinct first column values in a HashSet using an SqlFormat query. E.g: - db.ColumnDistinctFmt<int>("SELECT Age FROM Person WHERE Age < {0}", 50) - - - - - Returns an Dictionary<K, List<V>> grouping made from the first two columns using an SqlFormat query. E.g: - db.LookupFmt<int, string>("SELECT Age, LastName FROM Person WHERE Age < {0}", 50) - - - - - Returns a Dictionary from the first 2 columns: Column 1 (Keys), Column 2 (Values) using an SqlFormat query. E.g: - db.DictionaryFmt<int, string>("SELECT Id, LastName FROM Person WHERE Age < {0}", 50) - - - - - Returns true if the Query returns any records, using an SqlFormat query. E.g: - db.ExistsFmt<Person>("Age = {0}", 42) - db.ExistsFmt<Person>("SELECT * FROM Person WHERE Age = {0}", 50) - - - - - Returns true if the Query returns any records that match the SqlExpression lambda, E.g: - db.Exists<Person>(q => q.Where(x => x.Age < 50)) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Select<Person>(q => q.Where(x => x.Age > 40)) - - - - - Project results from a number of joined tables into a different model - - - - - Returns a single result from using an SqlExpression lambda. E.g: - db.Single<Person>(q => q.Where(x => x.Age == 42)) - - - - - Returns the first result using a SqlFormat query. E.g: - db.SingleFmt<Person>("Age = {0}", 42) - - - - - Returns the count of rows that match the SqlExpression lambda, E.g: - db.Count<Person>(q => q.Where(x => x.Age < 50)) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelectAsync<Person>(q => q.Where(x => x.Age > 40)) - - - - - Create a new SqlExpression builder allowing typed LINQ-like queries. - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Select<Person>(q => q.Where(x => x.Age > 40)) - - - - - Project results from a number of joined tables into a different model - - - - - Project results from a number of joined tables into a different model - - - - - Returns a single result from using an SqlExpression lambda. E.g: - db.Single<Person>(q => q.Where(x => x.Age == 42)) - - - - - Returns the count of rows that match the SqlExpression lambda, E.g: - db.Count<Person>(q => q.Where(x => x.Age < 50)) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelect<Person>(q => q.Where(x => x.Age > 40)) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelect<Person>(q => q.Where(x => x.Age > 40), include: x => new { x.PrimaryAddress }) - - - - - Delete rows using a SqlFormat filter. E.g: - - number of rows deleted - - - - Delete rows from the runtime table type using a SqlFormat filter. E.g: - - db.DeleteFmt(typeof(Person), "Age = {0}", 27) - number of rows deleted - - - - Delete rows using a SqlFormat filter. E.g: - db.Delete<Person>("Age > {0}", 42) - - number of rows deleted - - - - Delete rows from the runtime table type using a SqlFormat filter. E.g: - - db.DeleteFmt(typeof(Person), "Age = {0}", 27) - number of rows deleted - - - - Insert only fields in POCO specified by the SqlExpression lambda. E.g: - db.InsertOnly(new Person { FirstName = "Amy", Age = 27 }, q => q.Insert(p => new { p.FirstName, p.Age })) - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnly(new Person { FirstName = "Amy" }, q => q.Insert(p => new { p.FirstName })); - INSERT INTO "Person" ("FirstName") VALUES ('Amy'); - - - - - Use an SqlExpression to select which fields to update and construct the where expression, E.g: - - db.UpdateOnly(new Person { FirstName = "JJ" }, ev => ev.Update(p => p.FirstName).Where(x => x.FirstName == "Jimi")); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("FirstName" = 'Jimi') - - What's not in the update expression doesn't get updated. No where expression updates all rows. E.g: - - db.UpdateOnly(new Person { FirstName = "JJ", LastName = "Hendo" }, ev => ev.Update(p => p.FirstName)); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Flexible Update method to succinctly execute a free-text update statement using optional params. E.g: - - db.Update<Person>(set:"FirstName = {0}".Params("JJ"), where:"LastName = {0}".Params("Hendrix")); - UPDATE "Person" SET FirstName = 'JJ' WHERE LastName = 'Hendrix' - - - - - Flexible Update method to succinctly execute a free-text update statement using optional params. E.g. - - db.Update(table:"Person", set: "FirstName = {0}".Params("JJ"), where: "LastName = {0}".Params("Hendrix")); - UPDATE "Person" SET FirstName = 'JJ' WHERE LastName = 'Hendrix' - - - - - Flexible Delete method to succinctly execute a delete statement using free-text where expression. E.g. - - db.Delete<Person>(where:"Age = {0}".Params(27)); - DELETE FROM "Person" WHERE Age = 27 - - - - - Flexible Delete method to succinctly execute a delete statement using free-text where expression. E.g. - - db.Delete(table:"Person", where: "Age = {0}".Params(27)); - DELETE FROM "Person" WHERE Age = 27 - - - - - Delete the rows that matches the where expression, e.g: - - db.Delete<Person>(ev => ev.Where(p => p.Age == 27)); - DELETE FROM "Person" WHERE ("Age" = 27) - - - - - Insert only fields in POCO specified by the SqlExpression lambda. E.g: - db.InsertOnly(new Person { FirstName = "Amy", Age = 27 }, q => q.Insert(p => new { p.FirstName, p.Age })) - - - - - Use an SqlExpression to select which fields to update and construct the where expression, E.g: - - db.UpdateOnly(new Person { FirstName = "JJ" }, ev => ev.Update(p => p.FirstName).Where(x => x.FirstName == "Jimi")); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("FirstName" = 'Jimi') - - What's not in the update expression doesn't get updated. No where expression updates all rows. E.g: - - db.UpdateOnly(new Person { FirstName = "JJ", LastName = "Hendo" }, ev => ev.Update(p => p.FirstName)); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Flexible Update method to succinctly execute a free-text update statement using optional params. E.g: - - db.Update<Person>(set:"FirstName = {0}".Params("JJ"), where:"LastName = {0}".Params("Hendrix")); - UPDATE "Person" SET FirstName = 'JJ' WHERE LastName = 'Hendrix' - - - - - Flexible Update method to succinctly execute a free-text update statement using optional params. E.g. - - db.Update(table:"Person", set: "FirstName = {0}".Params("JJ"), where: "LastName = {0}".Params("Hendrix")); - UPDATE "Person" SET FirstName = 'JJ' WHERE LastName = 'Hendrix' - - - - - Flexible Delete method to succinctly execute a delete statement using free-text where expression. E.g. - - db.Delete<Person>(where:"Age = {0}".Params(27)); - DELETE FROM "Person" WHERE Age = 27 - - - - - Flexible Delete method to succinctly execute a delete statement using free-text where expression. E.g. - - db.Delete(table:"Person", where: "Age = {0}".Params(27)); - DELETE FROM "Person" WHERE Age = 27 - - - - - Delete the rows that matches the where expression, e.g: - - db.Delete<Person>(ev => ev.Where(p => p.Age == 27)); - DELETE FROM "Person" WHERE ("Age" = 27) - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnly(new Person { FirstName = "Amy" }, q => q.Insert(p => new { p.FirstName })); - INSERT INTO "Person" ("FirstName") VALUES ('Amy'); - - - - - Tell ServiceStack to use ThreadStatic Items Collection for Context Scoped items. - Warning: ThreadStatic Items aren't pinned to the same request in async services which callback on different threads. - - - - - Gets a list of items for this context. - - - - - Returns results from the active connection. - - - - - Returns results from using sql. E.g: - db.Select<Person>("Age > 40") - db.Select<Person>("SELECT * FROM Person WHERE Age > 40") - - - - - Returns results from using a parameterized query. E.g: - db.Select<Person>("Age > @age", new { age = 40}) - db.Select<Person>("SELECT * FROM Person WHERE Age > @age", new[] { db.CreateParam("age",40) }) - - - - - Returns results from using a parameterized query. E.g: - db.Select<Person>("Age > @age", new { age = 40}) - db.Select<Person>("SELECT * FROM Person WHERE Age > @age", new { age = 40}) - - - - - Returns results from using a parameterized query. E.g: - db.Select<Person>("Age > @age", new Dictionary<string, object> { { "age", 40 } }) - db.Select<Person>("SELECT * FROM Person WHERE Age > @age", new Dictionary<string, object> { { "age", 40 } }) - - - - - Returns a partial subset of results from the specified tableType. E.g: - db.Select<EntityWithId>(typeof(Person)) - - - - - - Returns a partial subset of results from the specified tableType. E.g: - db.Select<EntityWithId>(typeof(Person), "Age = @age", new { age = 27 }) - - - - - - Returns results from using a single name, value filter. E.g: - db.Where<Person>("Age", 27) - - - - - Returns results from using an anonymous type filter. E.g: - db.Where<Person>(new { Age = 27 }) - - - - - Returns results using the supplied primary key ids. E.g: - db.SelectByIds<Person>(new[] { 1, 2, 3 }) - - - - - Query results using the non-default values in the supplied partially populated POCO example. E.g: - db.SelectNonDefaults(new Person { Id = 1 }) - - - - - Query results using the non-default values in the supplied partially populated POCO example. E.g: - db.SelectNonDefaults("Age > @Age", new Person { Age = 42 }) - - - - - Returns the first result using a parameterized query. E.g: - db.Single<Person>(new { Age = 42 }) - - - - - Returns results from using a single name, value filter. E.g: - db.Single<Person>("Age = @age", new[] { db.CreateParam("age",42) }) - - - - - Returns results from using a single name, value filter. E.g: - db.Single<Person>("Age = @age", new { age = 42 }) - - - - - Returns the first result using a primary key id. E.g: - db.SingleById<Person>(1) - - - - - Returns the first result using a name, value filter. E.g: - db.SingleWhere<Person>("Age", 42) - - - - - Returns a single scalar value using an SqlExpression. E.g: - db.Column<int>(db.From<Persion>().Select(x => Sql.Count("*")).Where(q => q.Age > 40)) - - - - - Returns a single scalar value using a parameterized query. E.g: - db.Scalar<int>("SELECT COUNT(*) FROM Person WHERE Age > @age", new[] { db.CreateParam("age",40) }) - - - - - Returns a single scalar value using a parameterized query. E.g: - db.Scalar<int>("SELECT COUNT(*) FROM Person WHERE Age > @age", new { age = 40 }) - - - - - Returns the distinct first column values in a HashSet using an SqlExpression. E.g: - db.Column<int>(db.From<Persion>().Select(x => x.LastName).Where(q => q.Age == 27)) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.Column<string>("SELECT LastName FROM Person WHERE Age = @age", new[] { db.CreateParam("age",27) }) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.Column<string>("SELECT LastName FROM Person WHERE Age = @age", new { age = 27 }) - - - - - - Returns the distinct first column values in a HashSet using an SqlFormat query. E.g: - db.ColumnDistinct<int>("SELECT Age FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns the distinct first column values in a HashSet using an SqlFormat query. E.g: - db.ColumnDistinct<int>("SELECT Age FROM Person WHERE Age < @age", new { age = 50 }) - - - - - - Returns an Dictionary<K, List<V>> grouping made from the first two columns using an parameterized query. E.g: - db.Lookup<int, string>("SELECT Age, LastName FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns an Dictionary<K, List<V>> grouping made from the first two columns using an parameterized query. E.g: - db.Lookup<int, string>("SELECT Age, LastName FROM Person WHERE Age < @age", new { age = 50 }) - - - - - - Returns a Dictionary from the first 2 columns: Column 1 (Keys), Column 2 (Values) using sql. E.g: - db.Dictionary<int, string>("SELECT Id, LastName FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns a Dictionary from the first 2 columns: Column 1 (Keys), Column 2 (Values) using sql. E.g: - db.Dictionary<int, string>("SELECT Id, LastName FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns true if the Query returns any records that match the LINQ expression, E.g: - db.Exists<Person>(x => x.Age < 50) - - - - - Returns true if the Query returns any records that match the supplied SqlExpression, E.g: - db.Exists(db.From<Person>().Where(x => x.Age < 50)) - - - - - Returns true if the Query returns any records, using an SqlFormat query. E.g: - db.Exists<Person>(new { Age = 42 }) - - - - - Returns true if the Query returns any records, using a parameterized query. E.g: - db.Exists<Person>("Age = @age", new { age = 42 }) - db.Exists<Person>("SELECT * FROM Person WHERE Age = @age", new { age = 42 }) - - - - - Returns results from an arbitrary SqlExpression. E.g: - db.SqlList<Person>(db.From<Person>().Select("*").Where(q => q.Age < 50)) - - - - - Returns results from an arbitrary parameterized raw sql query. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", new { age = 50 }) - - - - - Returns results from an arbitrary parameterized raw sql query. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", new { age = 50 }) - - - - - Returns results from an arbitrary parameterized raw sql query. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", new Dictionary<string, object> { { "age", 42 } }) - - - - - Returns results from an arbitrary parameterized raw sql query with a dbCmd filter. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", dbCmd => ...) - - - - - - Returns the first column in a List using a parameterized query. E.g: - db.SqlColumn<string>("SELECT LastName FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns the first column in a List using a parameterized query. E.g: - db.SqlColumn<string>("SELECT LastName FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns the first column in a List using a parameterized query. E.g: - db.SqlColumn<string>("SELECT LastName FROM Person WHERE Age < @age", new Dictionary<string, object> { { "age", 50 } }) - - - - - Returns a single Scalar value using an SqlExpression. E.g: - db.SqlScalar<int>(db.From<Person>().Select(Sql.Count("*")).Where(q => q.Age < 50)) - - - - - Returns a single Scalar value using a parameterized query. E.g: - db.SqlScalar<int>("SELECT COUNT(*) FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns a single Scalar value using a parameterized query. E.g: - db.SqlScalar<int>("SELECT COUNT(*) FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns a single Scalar value using a parameterized query. E.g: - db.SqlScalar<int>("SELECT COUNT(*) FROM Person WHERE Age < @age", new Dictionary<string, object> { { "age", 50 } }) - - - - - Executes a raw sql non-query using sql. E.g: - var rowsAffected = db.ExecuteNonQueryAsync("UPDATE Person SET LastName={0} WHERE Id={1}".SqlFormat("WaterHouse", 7)) - - number of rows affected - - - - Executes a raw sql non-query using a parameterized query. E.g: - var rowsAffected = db.ExecuteNonQueryAsync("UPDATE Person SET LastName=@name WHERE Id=@id", new { name = "WaterHouse", id = 7 }) - - number of rows affected - - - - Executes a raw sql non-query using a parameterized query. - - number of rows affected - - - - Returns results from a Stored Procedure, using a parameterized query. - - - - - Returns the scalar result as a long. - - - - - Returns the first result with all its references loaded, using a primary key id. E.g: - db.LoadSingleById<Person>(1) - - - - - Returns the first result with all its references loaded, using a primary key id. E.g: - db.LoadSingleById<Person>(1, include = x => new { x.Address }) - - - - - Loads all the related references onto the instance. E.g: - db.LoadReferencesAsync(customer) - - - - - Execute any arbitrary raw SQL. - - number of rows affected - - - - Execute any arbitrary raw SQL with db params. - - number of rows affected - - - - Insert 1 POCO, use selectIdentity to retrieve the last insert AutoIncrement id (if any). E.g: - var id = db.Insert(new Person { Id = 1, FirstName = "Jimi }, selectIdentity:true) - - - - - Insert 1 or more POCOs in a transaction. E.g: - db.Insert(new Person { Id = 1, FirstName = "Tupac", LastName = "Shakur", Age = 25 }, - new Person { Id = 2, FirstName = "Biggie", LastName = "Smalls", Age = 24 }) - - - - - Insert a collection of POCOs in a transaction. E.g: - db.InsertAll(new[] { new Person { Id = 9, FirstName = "Biggie", LastName = "Smalls", Age = 24 } }) - - - - - Updates 1 POCO. All fields are updated except for the PrimaryKey which is used as the identity selector. E.g: - db.Update(new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 }) - - - - - Updates 1 or more POCOs in a transaction. E.g: - db.Update(new Person { Id = 1, FirstName = "Tupac", LastName = "Shakur", Age = 25 }, - new Person { Id = 2, FirstName = "Biggie", LastName = "Smalls", Age = 24 }) - - - - - Updates 1 or more POCOs in a transaction. E.g: - db.UpdateAll(new[] { new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 } }) - - - - - Delete rows using an anonymous type filter. E.g: - db.Delete<Person>(new { FirstName = "Jimi", Age = 27 }) - - number of rows deleted - - - - Delete 1 row using all fields in the filter. E.g: - db.Delete(new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 }) - - number of rows deleted - - - - Delete 1 or more rows in a transaction using all fields in the filter. E.g: - db.Delete(new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 }) - - - - - Delete 1 or more rows using only field with non-default values in the filter. E.g: - db.DeleteNonDefaults(new Person { FirstName = "Jimi", Age = 27 }) - - number of rows deleted - - - - Delete 1 or more rows in a transaction using only field with non-default values in the filter. E.g: - db.DeleteNonDefaults(new Person { FirstName = "Jimi", Age = 27 }, - new Person { FirstName = "Janis", Age = 27 }) - - number of rows deleted - - - - Delete 1 row by the PrimaryKey. E.g: - db.DeleteById<Person>(1) - - number of rows deleted - - - - Delete 1 row by the PrimaryKey where the rowVersion matches the optimistic concurrency field. - Will throw RowModefiedExeption if the - row does not exist or has a different row version. - E.g: db.DeleteById<Person>(1) - - - - - Delete all rows identified by the PrimaryKeys. E.g: - db.DeleteById<Person>(new[] { 1, 2, 3 }) - - number of rows deleted - - - - Delete all rows in the generic table type. E.g: - db.DeleteAll<Person>() - - number of rows deleted - - - - Delete all rows in the runtime table type. E.g: - db.DeleteAll(typeof(Person)) - - number of rows deleted - - - - Insert a new row or update existing row. Returns true if a new row was inserted. - Optional references param decides whether to save all related references as well. E.g: - db.SaveAsync(customer, references:true) - - true if a row was inserted; false if it was updated - - - - Insert new rows or update existing rows. Return number of rows added E.g: - db.SaveAsync(new Person { Id = 10, FirstName = "Amy", LastName = "Winehouse", Age = 27 }) - - number of rows added - - - - Insert new rows or update existing rows. Return number of rows added E.g: - db.SaveAllAsync(new [] { new Person { Id = 10, FirstName = "Amy", LastName = "Winehouse", Age = 27 } }) - - number of rows added - - - - Populates all related references on the instance with its primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveAllReferences(customer) - - - - - Populates the related references with the instance primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveReference(customer, customer.Orders) - - - - - Populates the related references with the instance primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveReference(customer, customer.Orders) - - - - - Populates the related references with the instance primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveReferences(customer, customer.Orders) - - - - - Returns results from using a LINQ Expression. E.g: - db.Select<Person>(x => x.Age > 40) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Select(db.From<Person>().Where(x => x.Age > 40)) - - - - - Project results from a number of joined tables into a different model - - - - - Returns results from using an SqlExpression lambda. E.g: - db.SelectAsync(db.From<Person>().Where(x => x.Age > 40)) - - - - - Returns a single result from using a LINQ Expression. E.g: - db.Single<Person>(x => x.Age == 42) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.SingleAsync<Person>(x => x.Age > 40) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.SingleAsync(db.From<Person>().Where(x => x.Age > 40)) - - - - - Returns a scalar result from using an SqlExpression lambda. E.g: - db.Scalar<Person, int>(x => Sql.Max(x.Age)) - - - - - Returns a scalar result from using an SqlExpression lambda. E.g: - db.Scalar<Person, int>(x => Sql.Max(x.Age), , x => x.Age < 50) - - - - - Returns the count of rows that match the LINQ expression, E.g: - db.Count<Person>(x => x.Age < 50) - - - - - Returns the count of rows that match the supplied SqlExpression, E.g: - db.Count(db.From<Person>().Where(x => x.Age < 50)) - - - - - Return the number of rows returned by the supplied expression - - - - - Return the number of rows returned by the supplied sql - - - - - Returns results with references from using a LINQ Expression. E.g: - db.LoadSelectAsync<Person>(x => x.Age > 40) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelectAsync(db.From<Person>().Where(x => x.Age > 40)) - - - - - Project results with references from a number of joined tables into a different model - - - - - Use an SqlExpression to select which fields to update and construct the where expression, E.g: - - var q = db.From>Person<()); - db.UpdateOnly(new Person { FirstName = "JJ" }, q.Update(p => p.FirstName).Where(x => x.FirstName == "Jimi")); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("FirstName" = 'Jimi') - - What's not in the update expression doesn't get updated. No where expression updates all rows. E.g: - - db.UpdateOnly(new Person { FirstName = "JJ", LastName = "Hendo" }, ev.Update(p => p.FirstName)); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - - db.UpdateOnlyAsync(() => new Person { FirstName = "JJ" }, where: p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - db.UpdateOnlyAsync(() => new Person { FirstName = "JJ" }); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - - db.UpdateOnlyAsync(() => new Person { FirstName = "JJ" }, db.From<Person>().Where(p => p.LastName == "Hendrix")); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - - db.UpdateOnly(new Person { FirstName = "JJ" }, p => p.FirstName, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - db.UpdateOnly(new Person { FirstName = "JJ" }, p => p.FirstName); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - Numeric fields generates an increment sql which is usefull to increment counters, etc... - avoiding concurrency conflicts - - db.UpdateAddAsync(() => new Person { Age = 5 }, where: p => p.LastName == "Hendrix"); - UPDATE "Person" SET "Age" = "Age" + 5 WHERE ("LastName" = 'Hendrix') - - db.UpdateAddAsync(() => new Person { Age = 5 }); - UPDATE "Person" SET "Age" = "Age" + 5 - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - - db.UpdateOnly(new Person { FirstName = "JJ" }, new[]{ "FirstName" }, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - Numeric fields generates an increment sql which is usefull to increment counters, etc... - avoiding concurrency conflicts - - db.UpdateAddAsync(() => new Person { Age = 5 }, db.From<Person>().Where(p => p.LastName == "Hendrix")); - UPDATE "Person" SET "Age" = "Age" + 5 WHERE ("LastName" = 'Hendrix') - - - - - Updates all non-default values set on item matching the where condition (if any). E.g - - db.UpdateNonDefaults(new Person { FirstName = "JJ" }, p => p.FirstName == "Jimi"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("FirstName" = 'Jimi') - - - - - Updates all values set on item matching the where condition (if any). E.g - - db.Update(new Person { Id = 1, FirstName = "JJ" }, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "Id" = 1,"FirstName" = 'JJ',"LastName" = NULL,"Age" = 0 WHERE ("LastName" = 'Hendrix') - - - - - Updates all matching fields populated on anonymousType that matches where condition (if any). E.g: - - db.Update<Person>(new { FirstName = "JJ" }, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnlyAsync(new Person { FirstName = "Amy" }, p => p.FirstName)); - INSERT INTO "Person" ("FirstName") VALUES ('Amy'); - - db.InsertOnlyAsync(new Person { Id =1 , FirstName="Amy" }, p => new { p.Id, p.FirstName })); - INSERT INTO "Person" ("Id", "FirstName") VALUES (1, 'Amy'); - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnly(new Person { FirstName = "Amy" }, new[]{ "FirstName" })); - INSERT INTO "Person" ("FirstName") VALUES ('Amy'); - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnlyAsync(() => new Person { FirstName = "Amy" })); - INSERT INTO "Person" ("FirstName") VALUES (@FirstName); - - - - - Delete the rows that matches the where expression, e.g: - - db.Delete<Person>(p => p.Age == 27); - DELETE FROM "Person" WHERE ("Age" = 27) - - - - - Delete the rows that matches the where expression, e.g: - - var q = db.From>Person<()); - db.Delete<Person>(q.Where(p => p.Age == 27)); - DELETE FROM "Person" WHERE ("Age" = 27) - - - - - Clear select expression. All properties will be selected. - - - - - set the specified selectExpression. - - - raw Select expression: "Select SomeField1, SomeField2 from SomeTable" - - - - - Set the specified selectExpression using matching fields. - - - Matching Fields: "SomeField1, SomeField2" - - - - - - Offset of the first row to return. The offset of the initial row is 0 - - - - - Number of rows returned by a SELECT statement - - - - - Set the specified offset and rows for SQL Limit clause. - - - Offset of the first row to return. The offset of the initial row is 0 - - - Number of rows returned by a SELECT statement - - - - - Set the specified offset and rows for SQL Limit clause where they exist. - - - Offset of the first row to return. The offset of the initial row is 0 - - - Number of rows returned by a SELECT statement - - - - - Set the specified rows for Sql Limit clause. - - - Number of rows returned by a SELECT statement - - - - - Clear Sql Limit clause - - - - - Clear Offset and Limit clauses. Alias for Limit() - - - - - - Fields to be updated. - - - List<string> containing Names of properties to be updated - - - - - Fields to be updated. - - - IEnumerable<string> containing Names of properties to be updated - - - - - Fields to be updated. - - - x=> x.SomeProperty1 or x=> new { x.SomeProperty1, x.SomeProperty2 } - - - - - Clear UpdateFields list ( all fields will be updated) - - - - - Fields to be inserted. - - - x=> x.SomeProperty1 or x=> new{ x.SomeProperty1, x.SomeProperty2} - - - objectWithProperties - - - - - fields to be inserted. - - - IList<string> containing Names of properties to be inserted - - - - - Clear InsertFields list ( all fields will be inserted) - - - - - Determines whether the expression is the parameter inside MemberExpression which should be compared with TrueExpression. - - Returns true if the specified expression is the parameter inside MemberExpression which should be compared with TrueExpression; - otherwise, false. - - - - Determines whether the expression is the parameter. - - Returns true if the specified expression is parameter; - otherwise, false. - - - - Determines whether the expression is a Parameter or Convert Expression. - - Returns true if the specified expression is parameter or convert; - otherwise, false. - - - - Enables the efficient, dynamic composition of query predicates. - - - - - Creates a predicate that evaluates to true. - - - - - Creates a predicate that evaluates to false. - - - - - Creates a predicate expression from the specified lambda expression. - - - - - Combines the first predicate with the second using the logical "and". - - - - - Combines the first predicate with the second using the logical "or". - - - - - Negates the predicate. - - - - - Combines the first expression with the second using the specified merge function. - - - - - Creates a new SqlExpression builder allowing typed LINQ-like queries. - Alias for SqlExpression. - - - - - Creates a new SqlExpression builder for the specified type using a user-defined FROM sql expression. - - - - - Open a Transaction in OrmLite - - - - - Open a Transaction in OrmLite - - - - - Create a managed OrmLite IDbCommand - - - - - Returns results from using a LINQ Expression. E.g: - db.Select<Person>(x => x.Age > 40) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Select(db.From<Person>().Where(x => x.Age > 40)) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Select(db.From<Person>().Where(x => x.Age > 40)) - - - - - Returns a single result from using a LINQ Expression. E.g: - db.Single<Person>(x => x.Age == 42) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Select<Person>(x => x.Age > 40) - - - - - Returns results from using an SqlExpression lambda. E.g: - db.Single(db.From<Person>().Where(x => x.Age > 40)) - - - - - Returns a scalar result from using an SqlExpression lambda. E.g: - db.Scalar<Person, int>(x => Sql.Max(x.Age)) - - - - - Returns a scalar result from using an SqlExpression lambda. E.g: - db.Scalar<Person, int>(x => Sql.Max(x.Age), , x => x.Age < 50) - - - - - Returns the count of rows that match the LINQ expression, E.g: - db.Count<Person>(x => x.Age < 50) - - - - - Returns the count of rows that match the supplied SqlExpression, E.g: - db.Count(db.From<Person>().Where(x => x.Age < 50)) - - - - - Return the number of rows returned by the supplied expression - - - - - Return the number of rows returned by the supplied sql - - - - - Return the number of rows returned by the supplied sql and db params - - - - - Returns results with references from using a LINQ Expression. E.g: - db.LoadSelect<Person>(x => x.Age > 40) - - - - - Returns results with references from using a LINQ Expression. E.g: - db.LoadSelect<Person>(x => x.Age > 40, include: x => new { x.PrimaryAddress }) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelect(db.From<Person>().Where(x => x.Age > 40)) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelect(db.From<Person>().Where(x => x.Age > 40), include:q.OnlyFields) - - - - - Returns results with references from using an SqlExpression lambda. E.g: - db.LoadSelect(db.From<Person>().Where(x => x.Age > 40), include: x => new { x.PrimaryAddress }) - - - - - Project results with references from a number of joined tables into a different model - - - - - Project results with references from a number of joined tables into a different model - - - - - Project results with references from a number of joined tables into a different model - - - - - Use an SqlExpression to select which fields to update and construct the where expression, E.g: - - var q = db.From>Person<()); - db.UpdateOnly(new Person { FirstName = "JJ" }, q.Update(p => p.FirstName).Where(x => x.FirstName == "Jimi")); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("FirstName" = 'Jimi') - - What's not in the update expression doesn't get updated. No where expression updates all rows. E.g: - - db.UpdateOnly(new Person { FirstName = "JJ", LastName = "Hendo" }, ev.Update(p => p.FirstName)); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Update only fields in the specified expression that matches the where condition (if any), E.g: - - db.UpdateOnly(() => new Person { FirstName = "JJ" }, where: p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - db.UpdateOnly(() => new Person { FirstName = "JJ" }); - UPDATE "Person" SET "FirstName" = 'JJ' - - - - - Update only fields in the specified expression that matches the where condition (if any), E.g: - - db.UpdateOnly(() => new Person { FirstName = "JJ" }, db.From>Person<().Where(p => p.LastName == "Hendrix")); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - - db.UpdateOnly(new Person { FirstName = "JJ" }, p => p.FirstName, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - db.UpdateOnly(new Person { FirstName = "JJ" }, p => p.FirstName); - UPDATE "Person" SET "FirstName" = 'JJ' - - db.UpdateOnly(new Person { FirstName = "JJ", Age = 27 }, p => new { p.FirstName, p.Age ); - UPDATE "Person" SET "FirstName" = 'JJ', "Age" = 27 - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - - db.UpdateOnly(new Person { FirstName = "JJ" }, new[]{ "FirstName" }, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - Numeric fields generates an increment sql which is usefull to increment counters, etc... - avoiding concurrency conflicts - - db.UpdateAdd(() => new Person { Age = 5 }, where: p => p.LastName == "Hendrix"); - UPDATE "Person" SET "Age" = "Age" + 5 WHERE ("LastName" = 'Hendrix') - - db.UpdateAdd(() => new Person { Age = 5 }); - UPDATE "Person" SET "Age" = "Age" + 5 - - - - - Update record, updating only fields specified in updateOnly that matches the where condition (if any), E.g: - Numeric fields generates an increment sql which is usefull to increment counters, etc... - avoiding concurrency conflicts - - db.UpdateAdd(() => new Person { Age = 5 }, db.From<Person>().Where(p => p.LastName == "Hendrix")); - UPDATE "Person" SET "Age" = "Age" + 5 WHERE ("LastName" = 'Hendrix') - - - - - Updates all non-default values set on item matching the where condition (if any). E.g - - db.UpdateNonDefaults(new Person { FirstName = "JJ" }, p => p.FirstName == "Jimi"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("FirstName" = 'Jimi') - - - - - Updates all values set on item matching the where condition (if any). E.g - - db.Update(new Person { Id = 1, FirstName = "JJ" }, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "Id" = 1,"FirstName" = 'JJ',"LastName" = NULL,"Age" = 0 WHERE ("LastName" = 'Hendrix') - - - - - Updates all matching fields populated on anonymousType that matches where condition (if any). E.g: - - db.Update<Person>(new { FirstName = "JJ" }, p => p.LastName == "Hendrix"); - UPDATE "Person" SET "FirstName" = 'JJ' WHERE ("LastName" = 'Hendrix') - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnly(new Person { FirstName = "Amy" }, p => p.FirstName)); - INSERT INTO "Person" ("FirstName") VALUES ('Amy'); - - db.InsertOnly(new Person { Id =1 , FirstName="Amy" }, p => new { p.Id, p.FirstName })); - INSERT INTO "Person" ("Id", "FirstName") VALUES (1, 'Amy'); - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnly(new Person { FirstName = "Amy" }, new[]{ "FirstName" })); - INSERT INTO "Person" ("FirstName") VALUES ('Amy'); - - - - - Using an SqlExpression to only Insert the fields specified, e.g: - - db.InsertOnly(() => new Person { FirstName = "Amy" })); - INSERT INTO "Person" ("FirstName") VALUES (@FirstName); - - - - - Delete the rows that matches the where expression, e.g: - - db.Delete<Person>(p => p.Age == 27); - DELETE FROM "Person" WHERE ("Age" = 27) - - - - - Delete the rows that matches the where expression, e.g: - - var q = db.From<Person>()); - db.Delete<Person>(q.Where(p => p.Age == 27)); - DELETE FROM "Person" WHERE ("Age" = 27) - - - - - Returns results from the active connection. - - - - - Returns results from using sql. E.g: - db.Select<Person>("Age > 40") - db.Select<Person>("SELECT * FROM Person WHERE Age > 40") - - - - - Returns results from using sql. E.g: - db.Select<Person>("SELECT * FROM Person WHERE Age > @age", new[] { db.CreateParam("age", 40) }) - - - - - Returns results from using a parameterized query. E.g: - db.Select<Person>("Age > @age", new { age = 40}) - db.Select<Person>("SELECT * FROM Person WHERE Age > @age", new { age = 40}) - - - - - Returns results from using a parameterized query. E.g: - db.Select<Person>("Age > @age", new Dictionary<string, object> { { "age", 40 } }) - db.Select<Person>("SELECT * FROM Person WHERE Age > @age", new Dictionary<string, object> { { "age", 40 } }) - - - - - Returns a partial subset of results from the specified tableType. E.g: - db.Select<EntityWithId>(typeof(Person)) - - - - - - Returns a partial subset of results from the specified tableType. E.g: - db.Select<EntityWithId>(typeof(Person)) - - - - - - Returns results from using a single name, value filter. E.g: - db.Where<Person>("Age", 27) - - - - - Returns results from using an anonymous type filter. E.g: - db.Where<Person>(new { Age = 27 }) - - - - - Returns results using the supplied primary key ids. E.g: - db.SelectByIds<Person>(new[] { 1, 2, 3 }) - - - - - Query results using the non-default values in the supplied partially populated POCO example. E.g: - db.SelectNonDefaults(new Person { Id = 1 }) - - - - - Query results using the non-default values in the supplied partially populated POCO example. E.g: - db.SelectNonDefaults("Age > @Age", new Person { Age = 42 }) - - - - - Returns a lazyily loaded stream of results. E.g: - db.SelectLazy<Person>() - - - - - Returns a lazyily loaded stream of results using a parameterized query. E.g: - db.SelectLazy<Person>("Age > @age", new { age = 40 }) - - - - - Returns a lazyily loaded stream of results using a parameterized query. E.g: - db.SelectLazy(db.From<Person>().Where(x => x == 40)) - - - - - Returns a stream of results that are lazily loaded using a parameterized query. E.g: - db.WhereLazy<Person>(new { Age = 27 }) - - - - - Returns the first result using a parameterized query. E.g: - db.Single<Person>(new { Age = 42 }) - - - - - Returns results from using a single name, value filter. E.g: - db.Single<Person>("Age = @age", new[] { db.CreateParam("age",40) }) - - - - - Returns results from using a single name, value filter. E.g: - db.Single<Person>("Age = @age", new { age = 42 }) - - - - - Returns the first result using a primary key id. E.g: - db.SingleById<Person>(1) - - - - - Returns the first result using a name, value filter. E.g: - db.SingleWhere<Person>("Age", 42) - - - - - Returns a single scalar value using an SqlExpression. E.g: - db.Column<int>(db.From<Persion>().Select(x => Sql.Count("*")).Where(q => q.Age > 40)) - - - - - Returns a single scalar value using a parameterized query. E.g: - db.Scalar<int>("SELECT COUNT(*) FROM Person WHERE Age > @age", new[] { db.CreateParam("age",40) }) - - - - - Returns a single scalar value using a parameterized query. E.g: - db.Scalar<int>("SELECT COUNT(*) FROM Person WHERE Age > @age", new { age = 40 }) - - - - - Returns the distinct first column values in a HashSet using an SqlExpression. E.g: - db.Column<int>(db.From<Persion>().Select(x => x.LastName).Where(q => q.Age == 27)) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.Column<string>("SELECT LastName FROM Person WHERE Age = @age", new[] { db.CreateParam("age",27) }) - - - - - Returns the distinct first column values in a HashSet using an SqlExpression. E.g: - db.ColumnLazy<int>(db.From<Persion>().Select(x => x.LastName).Where(q => q.Age == 27)) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.ColumnLazy<string>("SELECT LastName FROM Person WHERE Age = @age", new[] { db.CreateParam("age",27) }) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.ColumnLazy<string>("SELECT LastName FROM Person WHERE Age = @age", new { age = 27 }) - - - - - Returns the first column in a List using a SqlFormat query. E.g: - db.Column<string>("SELECT LastName FROM Person WHERE Age = @age", new { age = 27 }) - - - - - - Returns the distinct first column values in a HashSet using an SqlFormat query. E.g: - db.ColumnDistinct<int>("SELECT Age FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns the distinct first column values in a HashSet using an SqlFormat query. E.g: - db.ColumnDistinct<int>("SELECT Age FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - - Returns an Dictionary<K, List<V>> grouping made from the first two columns using an parameterized query. E.g: - db.Lookup<int, string>("SELECT Age, LastName FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns an Dictionary<K, List<V>> grouping made from the first two columns using an parameterized query. E.g: - db.Lookup<int, string>("SELECT Age, LastName FROM Person WHERE Age < @age", new { age = 50 }) - - - - - - Returns a Dictionary from the first 2 columns: Column 1 (Keys), Column 2 (Values) using sql. E.g: - db.Dictionary<int, string>("SELECT Id, LastName FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns true if the Query returns any records that match the LINQ expression, E.g: - db.Exists<Person>(x => x.Age < 50) - - - - - Returns true if the Query returns any records that match the supplied SqlExpression, E.g: - db.Exists(db.From<Person>().Where(x => x.Age < 50)) - - - - - Returns true if the Query returns any records, using an SqlFormat query. E.g: - db.Exists<Person>(new { Age = 42 }) - - - - - Returns true if the Query returns any records, using a parameterized query. E.g: - db.Exists<Person>("Age = @age", new { age = 42 }) - db.Exists<Person>("SELECT * FROM Person WHERE Age = @age", new { age = 42 }) - - - - - Returns results from an arbitrary SqlExpression. E.g: - db.SqlList<Person>(db.From<Person>().Select("*").Where(q => q.Age < 50)) - - - - - Returns results from an arbitrary parameterized raw sql query. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", new[] { db.CreateParam("age",50) }) - - - - - Returns results from an arbitrary parameterized raw sql query. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", new { age = 50 }) - - - - - Returns results from an arbitrary parameterized raw sql query. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", new Dictionary<string, object> { { "age", 42 } }) - - - - - Returns results from an arbitrary parameterized raw sql query with a dbCmd filter. E.g: - db.SqlList<Person>("EXEC GetRockstarsAged @age", dbCmd => ...) - - - - - Prepare Stored Procedure with Input parameters, optionally populated with Input Params. E.g: - var cmd = db.SqlProc("GetRockstarsAged", new { age = 42 }) - - - - - - Returns the first column in a List using a parameterized query. E.g: - db.SqlColumn<string>("SELECT LastName FROM Person WHERE Age < @age", new[] { db.CreateParam("age",50) }) - - - - - Returns the first column in a List using a parameterized query. E.g: - db.SqlColumn<string>("SELECT LastName FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns the first column in a List using a parameterized query. E.g: - db.SqlColumn<string>("SELECT LastName FROM Person WHERE Age < @age", new Dictionary<string, object> { { "age", 50 } }) - - - - - Returns a single Scalar value using an SqlExpression. E.g: - db.SqlScalar<int>(db.From<Person>().Select(Sql.Count("*")).Where(q => q.Age < 50)) - - - - - Returns a single Scalar value using a parameterized query. E.g: - db.SqlScalar<int>("SELECT COUNT(*) FROM Person WHERE Age < @age", new[]{ db.CreateParam("age",50) }) - - - - - Returns a single Scalar value using a parameterized query. E.g: - db.SqlScalar<int>("SELECT COUNT(*) FROM Person WHERE Age < @age", new { age = 50 }) - - - - - Returns a single Scalar value using a parameterized query. E.g: - db.SqlScalar<int>("SELECT COUNT(*) FROM Person WHERE Age < @age", new Dictionary<string, object> { { "age", 50 } }) - - - - - Returns the last insert Id made from this connection. - - - - - Executes a raw sql non-query using sql. E.g: - var rowsAffected = db.ExecuteNonQuery("UPDATE Person SET LastName={0} WHERE Id={1}".SqlFormat("WaterHouse", 7)) - - number of rows affected - - - - Executes a raw sql non-query using a parameterized query. E.g: - var rowsAffected = db.ExecuteNonQuery("UPDATE Person SET LastName=@name WHERE Id=@id", new { name = "WaterHouse", id = 7 }) - - number of rows affected - - - - Executes a raw sql non-query using a parameterized query. - - number of rows affected - - - - Executes a raw sql non-query using a parameterized query with a dbCmd filter. E.g: - - number of rows affected - - - - Returns results from a Stored Procedure, using a parameterized query. - - - - - Returns results from a Stored Procedure using an SqlFormat query. E.g: - - - - - - Returns the scalar result as a long. - - - - - Returns the first result with all its references loaded, using a primary key id. E.g: - db.LoadSingleById<Person>(1, include = new[]{ "Address" }) - - - - - Returns the first result with all its references loaded, using a primary key id. E.g: - db.LoadSingleById<Person>(1, include = x => new{ x.Address }) - - - - - Loads all the related references onto the instance. E.g: - db.LoadReferences(customer) - - - - - Checks whether a Table Exists. E.g: - db.TableExists("Person") - - - - - Checks whether a Table Exists. E.g: - db.TableExists<Person>() - - - - - Checks whether a Table Column Exists. E.g: - db.ColumnExists("Age", "Person") - - - - - Checks whether a Table Column Exists. E.g: - db.ColumnExists<Person>(x => x.Age) - - - - - Create DB Tables from the schemas of runtime types. E.g: - db.CreateTables(typeof(Table1), typeof(Table2)) - - - - - Create DB Table from the schema of the runtime type. Use overwrite to drop existing Table. E.g: - db.CreateTable(true, typeof(Table)) - - - - - Only Create new DB Tables from the schemas of runtime types if they don't already exist. E.g: - db.CreateTableIfNotExists(typeof(Table1), typeof(Table2)) - - - - - Drop existing DB Tables and re-create them from the schemas of runtime types. E.g: - db.DropAndCreateTables(typeof(Table1), typeof(Table2)) - - - - - Create a DB Table from the generic type. Use overwrite to drop the existing table or not. E.g: - db.CreateTable<Person>(overwrite=false) //default - db.CreateTable<Person>(overwrite=true) - - - - - Only create a DB Table from the generic type if it doesn't already exist. E.g: - db.CreateTableIfNotExists<Person>() - - - - - Only create a DB Table from the runtime type if it doesn't already exist. E.g: - db.CreateTableIfNotExists(typeof(Person)) - - - - - Drop existing table if exists and re-create a DB Table from the generic type. E.g: - db.DropAndCreateTable<Person>() - - - - - Drop existing table if exists and re-create a DB Table from the runtime type. E.g: - db.DropAndCreateTable(typeof(Person)) - - - - - Drop any existing tables from their runtime types. E.g: - db.DropTables(typeof(Table1),typeof(Table2)) - - - - - Drop any existing tables from the runtime type. E.g: - db.DropTable(typeof(Person)) - - - - - Drop any existing tables from the generic type. E.g: - db.DropTable<Person>() - - - - - Get the last SQL statement that was executed. - - - - - Execute any arbitrary raw SQL. - - number of rows affected - - - - Execute any arbitrary raw SQL with db params. - - number of rows affected - - - - Insert 1 POCO, use selectIdentity to retrieve the last insert AutoIncrement id (if any). E.g: - var id = db.Insert(new Person { Id = 1, FirstName = "Jimi }, selectIdentity:true) - - - - - Insert 1 or more POCOs in a transaction. E.g: - db.Insert(new Person { Id = 1, FirstName = "Tupac", LastName = "Shakur", Age = 25 }, - new Person { Id = 2, FirstName = "Biggie", LastName = "Smalls", Age = 24 }) - - - - - Insert a collection of POCOs in a transaction. E.g: - db.InsertAll(new[] { new Person { Id = 9, FirstName = "Biggie", LastName = "Smalls", Age = 24 } }) - - - - - Updates 1 POCO. All fields are updated except for the PrimaryKey which is used as the identity selector. E.g: - db.Update(new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 }) - - - - - Updates 1 or more POCOs in a transaction. E.g: - db.Update(new Person { Id = 1, FirstName = "Tupac", LastName = "Shakur", Age = 25 }, - new Person { Id = 2, FirstName = "Biggie", LastName = "Smalls", Age = 24 }) - - - - - Updates 1 or more POCOs in a transaction. E.g: - db.UpdateAll(new[] { new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 } }) - - - - - Delete rows using an anonymous type filter. E.g: - db.Delete<Person>(new { FirstName = "Jimi", Age = 27 }) - - number of rows deleted - - - - Delete 1 row using all fields in the filter. E.g: - db.Delete(new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 }) - - number of rows deleted - - - - Delete 1 or more rows in a transaction using all fields in the filter. E.g: - db.Delete(new Person { Id = 1, FirstName = "Jimi", LastName = "Hendrix", Age = 27 }) - - - - - Delete 1 or more rows using only field with non-default values in the filter. E.g: - db.DeleteNonDefaults(new Person { FirstName = "Jimi", Age = 27 }) - - number of rows deleted - - - - Delete 1 or more rows in a transaction using only field with non-default values in the filter. E.g: - db.DeleteNonDefaults(new Person { FirstName = "Jimi", Age = 27 }, - new Person { FirstName = "Janis", Age = 27 }) - - number of rows deleted - - - - Delete 1 row by the PrimaryKey. E.g: - db.DeleteById<Person>(1) - - number of rows deleted - - - - Delete 1 row by the PrimaryKey where the rowVersion matches the optimistic concurrency field. - Will throw RowModefiedExeption if the - row does not exist or has a different row version. - E.g: db.DeleteById<Person>(1) - - - - - Delete all rows identified by the PrimaryKeys. E.g: - db.DeleteById<Person>(new[] { 1, 2, 3 }) - - number of rows deleted - - - - Delete all rows in the generic table type. E.g: - db.DeleteAll<Person>() - - number of rows deleted - - - - Delete all rows provided. E.g: - db.DeleteAll<Person>(people) - - number of rows deleted - - - - Delete all rows in the runtime table type. E.g: - db.DeleteAll(typeof(Person)) - - number of rows deleted - - - - Delete rows using a SqlFormat filter. E.g: - db.Delete<Person>("Age > @age", new { age = 42 }) - - number of rows deleted - - - - Delete rows using a SqlFormat filter. E.g: - db.Delete<Person>("Age > @age", new { age = 42 }) - - number of rows deleted - - - - Insert a new row or update existing row. Returns true if a new row was inserted. - Optional references param decides whether to save all related references as well. E.g: - db.Save(customer, references:true) - - true if a row was inserted; false if it was updated - - - - Insert new rows or update existing rows. Return number of rows added E.g: - db.Save(new Person { Id = 10, FirstName = "Amy", LastName = "Winehouse", Age = 27 }) - - number of rows added - - - - Insert new rows or update existing rows. Return number of rows added E.g: - db.SaveAll(new [] { new Person { Id = 10, FirstName = "Amy", LastName = "Winehouse", Age = 27 } }) - - number of rows added - - - - Populates all related references on the instance with its primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveAllReferences(customer) - - - - - Populates the related references with the instance primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveReference(customer, customer.Orders) - - - - - Populates the related references with the instance primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveReference(customer, customer.Orders) - - - - - Populates the related references with the instance primary key and saves them. Uses '(T)Id' naming convention. E.g: - db.SaveReferences(customer, customer.Orders) - - - - - Generates inline UPDATE SQL Statement - - - - - Generates inline INSERT SQL Statement - - - - - Wrapper IDbConnection class to allow for connection sharing, mocking, etc. - - - - - Allow for mocking and unit testing by providing non-disposing - connection factory with injectable IDbCommand and IDbTransaction proxies - - - - - Force the IDbConnection to always return this IDbCommand - - - - - Force the IDbConnection to always return this IDbTransaction - - - - - Alias for OpenDbConnection - - - - - Alias for OpenDbConnection - - - - - Allow for code-sharing between OrmLite, IPersistenceProvider and ICacheClient - - - - - Gets the explicit Converter registered for a specific type - - - - - Return best matching converter, falling back to Enum, Value or Ref Type Converters - - - - - Quote the string so that it can be used inside an SQL-expression - Escape quotes inside the string - - - - - - - Nice SqlBuilder class by @samsaffron from Dapper.Contrib: - http://samsaffron.com/archive/2011/09/05/Digging+ourselves+out+of+the+mess+Linq-2-SQL+created - Modified to work in .NET 3.5 - - - - - Unquoted alias for the column or expression being selected. - - - - - The SQL expression being selected, including any necessary quoting. - - - - - Unquoted column name being selected. - - - - - Table name or alias used to prefix the column name, if any. Already quoted. - - - - diff --git a/lib/tests/ServiceStack.Redis.dll b/lib/tests/ServiceStack.Redis.dll deleted file mode 100644 index 7c31a96a..00000000 Binary files a/lib/tests/ServiceStack.Redis.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Redis.xml b/lib/tests/ServiceStack.Redis.xml deleted file mode 100644 index 467f087c..00000000 --- a/lib/tests/ServiceStack.Redis.xml +++ /dev/null @@ -1,1600 +0,0 @@ - - - - ServiceStack.Redis - - - - - Provides thread-safe retrievel of redis clients since each client is a new one. - Allows the configuration of different ReadWrite and ReadOnly hosts - - - BasicRedisClientManager for ICacheClient - - For more interoperabilty I'm also implementing the ICacheClient on - this cache client manager which has the affect of calling - GetCacheClient() for all write operations and GetReadOnlyCacheClient() - for the read ones. - - This works well for master-slave replication scenarios where you have - 1 master that replicates to multiple read slaves. - - - - - Gets or sets object key prefix. - - - - - Hosts can be an IP Address or Hostname in the format: host[:port] - e.g. 127.0.0.1:6379 - default is: localhost:6379 - - The write hosts. - The read hosts. - - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - - - - - - Courtesy of @marcgravell - http://code.google.com/p/protobuf-net/source/browse/trunk/protobuf-net/BufferPool.cs - - - - - Resolver strategy for resolving hosts and creating clients - - - - - Provides thread-safe pooling of redis client connections. - - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Called within a lock - - - - - - Disposes the write client. - - The client. - - - - The client wraps the native redis operations into a more readable c# API. - - Where possible these operations are also exposed in common c# interfaces, - e.g. RedisClient.Lists => IList[string] - RedisClient.Sets => ICollection[string] - - - - - Creates a new instance of the Redis Client from NewFactoryFn. - - - - - Store object fields as a dictionary of values in a Hash value. - Conversion to Dictionary can be customized with RedisClient.ConvertToHashFn - - - - - Returns key with automatic object id detection in provided value with generic type. - - - - - - - Returns key with explicit object id. - - - - - - - Returns key with explicit object type and id. - - - - - - - - A complete redis command, with method to send command, receive response, and run callback on success or failure - - - - - Allows you to get Redis value operations to operate against POCO types. - - - - - - Use this to share the same redis connection with another - - The client. - - - - Queue of commands for redis typed client - - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - - - - - Put "QUEUED" messages at back of queue - - - - - - Issue exec command (not queued) - - - - - callback for after result count is read in - - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Pipeline for redis typed client - - - - - - For interoperabilty GetCacheClient() and GetReadOnlyCacheClient() - return an ICacheClient wrapper around the redis manager which has the affect of calling - GetClient() for all write operations and GetReadOnlyClient() for the read ones. - - This works well for master-slave replication scenarios where you have - 1 master that replicates to multiple read slaves. - - - - - Ignore dispose on RedisClientsManager, which should be registered as a singleton - - - - - Useful wrapper IRedisClientsManager to cut down the boiler plate of most IRedisClient access - - - - - Creates a PubSubServer that uses a background thread to listen and process for - Redis Pub/Sub messages published to the specified channel. - Use optional callbacks to listen for message, error and life-cycle events. - Callbacks can be assigned later, then call Start() for PubSubServer to start listening for messages - - - - - A complete redis command, with method to send command, receive response, and run callback on success or failure - - - - - Redis command that does not get queued - - - - - Factory used to Create `RedisClient` instances - - - - - The default RedisClient Socket ConnectTimeout (default -1, None) - - - - - The default RedisClient Socket SendTimeout (default -1, None) - - - - - The default RedisClient Socket ReceiveTimeout (default -1, None) - - - - - Default Idle TimeOut before a connection is considered to be stale (default 240 secs) - - - - - The default RetryTimeout for auto retry of failed operations (default 10,000ms) - - - - - Default Max Pool Size for Pooled Redis Client Managers (default none) - - - - - The BackOff multiplier failed Auto Retries starts from (default 10ms) - - - - - The Byte Buffer Size to combine Redis Operations within (default 1450 bytes) - - - - - The Byte Buffer Size for Operations to use a byte buffer pool (default 500kb) - - - - - Whether Connections to Master hosts should be verified they're still master instances (default true) - - - - - The ConnectTimeout on clients used to find the next available host (default 200ms) - - - - - Skip ServerVersion Checks by specifying Min Version number, e.g: 2.8.12 => 2812, 2.9.1 => 2910 - - - - - How long to hold deactivated clients for before disposing their connection (default 1 min) - Dispose of deactivated Clients immediately with TimeSpan.Zero - - - - - Whether Debug Logging should log detailed Redis operations (default false) - - - - - Resets Redis Config and Redis Stats back to default values - - - - - Change to use a different IRedisClientsManager - - - - - Configure the Redis Connection String to use for a Redis Client Host - - - - - The configured Redis Client Manager this Sentinel managers - - - - - Fired when Sentinel fails over the Redis Client Manager to a new master - - - - - Fired when the Redis Sentinel Worker connection fails - - - - - Fired when the Sentinel worker receives a message from the Sentinel Subscription - - - - - Map the internal IP's returned by Sentinels to its external IP - - - - - Whether to routinely scan for other sentinel hosts (default true) - - - - - What interval to scan for other sentinel hosts (default 10 mins) - - - - - How long to wait after failing before connecting to next redis instance (default 250ms) - - - - - How long to retry connecting to hosts before throwing (default 60 secs) - - - - - How long to wait after consecutive failed connection attempts to master before forcing - a Sentinel to failover the current master (default 60 secs) - - - - - The Max Connection time for Sentinel Worker (default 100ms) - - - - - The Max TCP Socket Receive time for Sentinel Worker (default 100ms) - - - - - The Max TCP Socket Send time for Sentinel Worker (default 100ms) - - - - - Reset client connections when Sentinel reports redis instance is subjectively down (default true) - - - - - Reset client connections when Sentinel reports redis instance is objectively down (default true) - - - - - Initialize Sentinel Subscription and Configure Redis ClientsManager - - - - - Check if GetValidSentinel should try the next sentinel server - - - This will be true if the failures is less than either RedisSentinel.MaxFailures or the # of sentinels, whatever is greater - - - - Event that is fired when the sentinel subscription raises an event - - - - - - - Don't immediately kill connections of active clients after failover to give them a chance to dispose gracefully. - Deactivating clients are automatically cleared from the pool. - - - - - Total number of commands sent - - - - - Number of times the Redis Client Managers have FailoverTo() either by sentinel or manually - - - - - Number of times a Client was deactivated from the pool, either by FailoverTo() or exceptions on client - - - - - Number of times connecting to a Sentinel has failed - - - - - Number of times we've forced Sentinel to failover to another master due to - consecutive errors beyond sentinel.WaitBeforeForcingMasterFailover - - - - - Number of times a connecting to a reported Master wasn't actually a Master - - - - - Number of times no Masters could be found in any of the configured hosts - - - - - Number of Redis Client instances created with RedisConfig.ClientFactory - - - - - Number of times a Redis Client was created outside of pool, either due to overflow or reserved slot was overridden - - - - - Number of times Redis Sentinel reported a Subjective Down (sdown) - - - - - Number of times Redis Sentinel reported an Objective Down (sdown) - - - - - Number of times a Redis Request was retried due to Socket or Retryable exception - - - - - Number of times a Request succeeded after it was retried - - - - - Number of times a Retry Request failed after exceeding RetryTimeout - - - - - Total number of deactivated clients that are pending being disposed - - - - - Provides a redis connection pool that can be sharded - - - - - logical name - - - - - An arbitrary weight relative to other nodes - - - - logical name - An arbitrary weight relative to other nodes - redis nodes - - - - Provides sharding of redis client connections. - uses consistent hashing to distribute keys across connection pools - - - - - maps a key to a redis connection pool - - key to map - a redis connection pool - - - - Adds a node and maps points across the circle - - node to add - An arbitrary number, specifies how often it occurs relative to other targets. - - - - A variation of Binary Search algorithm. Given a number, matches the next highest number from the sorted array. - If a higher number does not exist, then the first number in the array is returned. - - a sorted array to perform the search - number to find the next highest number against - next highest number - - - - Given a key, generates an unsigned 64 bit hash code using MD5 - - - - - - - Provides access to the method reflection data as part of the before/after event - - - - - Stores details about the context in which an IRedisClient is allocated. - - - - - - Tracks each IRedisClient instance allocated from the IRedisClientsManager logging when they are allocated and disposed. - Periodically writes the allocated instances to the log for diagnostic purposes. - - - - - Distributed lock interface - - - - - distributed lock class that follows the Resource Allocation Is Initialization pattern - - - - - Lock - - - - in seconds - in seconds - - - - unlock - - - - - acquire distributed, non-reentrant lock on key - - global key for this lock - timeout for acquiring lock - timeout for lock, in seconds (stored as value against lock key) - - - - - - unlock key - - - - - - - - - - - - - Locking strategy interface - - - - - This class manages a read lock for a local readers/writer lock, - using the Resource Acquisition Is Initialization pattern - - - - - RAII initialization - - - - - - RAII disposal - - - - - This class manages a write lock for a local readers/writer lock, - using the Resource Acquisition Is Initialization pattern - - - - - - RAII disposal - - - - - Optimized implementation. Primitive types are manually serialized, the rest are serialized using binary serializer />. - - - - - - - - - - - - - - - - - - - serialize value and wrap with - - - - - - - Unwrap object wrapped in - - - - - - - - - - - - pop numProcessed items from queue and unlock queue for work item id that dequeued - items are associated with - - - - - - A dequeued work item has been processed. When all of the dequeued items have been processed, - all items will be popped from the queue,and the queue unlocked for the work item id that - the dequeued items are associated with - - - - - Update first unprocessed item with new work item. - - - - - - distributed work item queue. Each message must have an associated - work item id. For a given id, all work items are guaranteed to be processed - in the order in which they are received. - - - - - distributed work item queue. Each message must have an associated - work item id. For a given id, all work items are guaranteed to be processed - in the order in which they are received. - - - - - - - - - - - - - - - - - - - - - - - - - - - - Queue incoming messages - - - - - - - Must call this periodically to move work items from priority queue to pending queue - - - - - Replace existing work item in workItemId queue - - - - - - - - Pop items from list - - - - - - - Force release of locks held by crashed servers - - - - - release lock held by crashed server - - - - true if lock is released, either by this method or by another client; false otherwise - - - - Unlock work item id, so other servers can process items for this id - - - - - - pop remaining items that were returned by dequeue, and unlock queue - - - - - - indicate that an item has been processed by the caller - - - - - Update first unprocessed work item - - - - - - distributed work item queue. Messages are processed in chronological order - - - - - Enqueue incoming messages - - - - - - - - Dequeue next batch of work items - - - - - - - - - simple distributed work item queue - - - - - - - Queue incoming messages - - - - - - Dequeue next batch of work items for processing. After this method is called, - no other work items with same id will be available for - dequeuing until PostDequeue is called - - KeyValuePair: key is work item id, and value is list of dequeued items. - - - - - distributed work item queue - - - - - customize the client serializer - - - - - Serialize object to buffer - - serializable object - - - - - - - array of serializable objects - - - - - Deserialize buffer to object - - byte array to deserialize - - - - - - Enqueue item in priority queue corresponding to workItemId identifier - - - - - - - Preprare next work item id for dequeueing - - - - - Dequeue up to maxBatchSize items from queue corresponding to workItemId identifier. - Once this method is called, or will not - return any items for workItemId until the dequeue lock returned is unlocked. - - - - - - - - Replace existing work item in workItemId queue - - - - - - - - Enqueue item - - - - - - Dequeue up to maxBatchSize items from queue - - - - - - - wraps a serialized representation of an object - - - - - - Initializes a new instance of . - - Custom item data. - The serialized item. - - - - The data representing the item being stored/retireved. - - - - - Flags set for this instance. - - - - - Represents a generic collection of key/value pairs that are ordered independently of the key and value. - - The type of the keys in the dictionary - The type of the values in the dictionary - - - - Adds an entry with the specified key and value into the IOrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. - The index of the newly added entry - - You can also use the property to add new elements by setting the value of a key that does not exist in the IOrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the IOrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - An element with the same key already exists in the IOrderedDictionary<TKey,TValue> - The IOrderedDictionary<TKey,TValue> is read-only.
- -or-
- The IOrderedDictionary<TKey,TValue> has a fized size.
-
- - - Inserts a new entry into the IOrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - - The zero-based index at which the element should be inserted. - The key of the entry to add. - The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - is less than 0.
- -or-
- is greater than .
- An element with the same key already exists in the IOrderedDictionary<TKey,TValue>. - The IOrderedDictionary<TKey,TValue> is read-only.
- -or-
- The IOrderedDictionary<TKey,TValue> has a fized size.
-
- - - Gets or sets the value at the specified index. - - The zero-based index of the value to get or set. - The value of the item at the specified index. - is less than 0.
- -or-
- is equal to or greater than .
-
- - - Represents a generic collection of key/value pairs that are ordered independently of the key and value. - - The type of the keys in the dictionary - The type of the values in the dictionary - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class. - - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified initial capacity. - - The initial number of elements that the OrderedDictionary<TKey,TValue> can contain. - is less than 0 - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified comparer. - - The IEqualityComparer<TKey> to use when comparing keys, or to use the default EqualityComparer<TKey> for the type of the key. - - - - Initializes a new instance of the OrderedDictionary<TKey,TValue> class using the specified initial capacity and comparer. - - The initial number of elements that the OrderedDictionary<TKey,TValue> collection can contain. - The IEqualityComparer<TKey> to use when comparing keys, or to use the default EqualityComparer<TKey> for the type of the key. - is less than 0 - - - - Converts the object passed as a key to the key type of the dictionary - - The key object to check - The key object, cast as the key type of the dictionary - is . - The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - - - - Converts the object passed as a value to the value type of the dictionary - - The object to convert to the value type of the dictionary - The value object, converted to the value type of the dictionary - is , and the value type of the OrderedDictionary<TKey,TValue> is a value type. - The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - - - - Gets the dictionary object that stores the keys and values - - The dictionary object that stores the keys and values for the OrderedDictionary<TKey,TValue> - Accessing this property will create the dictionary object if necessary - - - - Gets the list object that stores the key/value pairs. - - The list object that stores the key/value pairs for the OrderedDictionary<TKey,TValue> - Accessing this property will create the list object if necessary. - - - - Inserts a new entry into the OrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - - The zero-based index at which the element should be inserted. - The key of the entry to add. - The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - is less than 0.
- -or-
- is greater than .
- is . - An element with the same key already exists in the OrderedDictionary<TKey,TValue>. -
- - - Inserts a new entry into the OrderedDictionary<TKey,TValue> collection with the specified key and value at the specified index. - - The zero-based index at which the element should be inserted. - The key of the entry to add. - The value of the entry to add. The value can be if the type of the values in the dictionary is a reference type. - is less than 0.
- -or-
- is greater than .
- is .
- -or-
- is , and the value type of the OrderedDictionary<TKey,TValue> is a value type.
- The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- -or-
- The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- -or-
- An element with the same key already exists in the OrderedDictionary<TKey,TValue>.
-
- - - Removes the entry at the specified index from the OrderedDictionary<TKey,TValue> collection. - - The zero-based index of the entry to remove. - is less than 0.
- -or-
- index is equal to or greater than .
-
- - - Gets or sets the value at the specified index. - - The zero-based index of the value to get or set. - The value of the item at the specified index. - is less than 0.
- -or-
- index is equal to or greater than .
-
- - - Gets or sets the value at the specified index. - - The zero-based index of the value to get or set. - The value of the item at the specified index. - is less than 0.
- -or-
- index is equal to or greater than .
- is a null reference, and the value type of the OrderedDictionary<TKey,TValue> is a value type. - The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . -
- - - Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. This value can be . - A key cannot be , but a value can be. - You can also use the property to add new elements by setting the value of a key that does not exist in the OrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the OrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - is - An element with the same key already exists in the OrderedDictionary<TKey,TValue> - - - - Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. This value can be . - The index of the newly added entry - A key cannot be , but a value can be. - You can also use the property to add new elements by setting the value of a key that does not exist in the OrderedDictionary<TKey,TValue> collection; however, if the specified key already exists in the OrderedDictionary<TKey,TValue>, setting the property overwrites the old value. In contrast, the method does not modify existing elements. - is - An element with the same key already exists in the OrderedDictionary<TKey,TValue> - - - - Adds an entry with the specified key and value into the OrderedDictionary<TKey,TValue> collection with the lowest available index. - - The key of the entry to add. - The value of the entry to add. This value can be . - is .
- -or-
- is , and the value type of the OrderedDictionary<TKey,TValue> is a value type.
- The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
- -or-
- The value type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of .
-
- - - Removes all elements from the OrderedDictionary<TKey,TValue> collection. - - The capacity is not changed as a result of calling this method. - - - - Determines whether the OrderedDictionary<TKey,TValue> collection contains a specific key. - - The key to locate in the OrderedDictionary<TKey,TValue> collection. - if the OrderedDictionary<TKey,TValue> collection contains an element with the specified key; otherwise, . - is - - - - Determines whether the OrderedDictionary<TKey,TValue> collection contains a specific key. - - The key to locate in the OrderedDictionary<TKey,TValue> collection. - if the OrderedDictionary<TKey,TValue> collection contains an element with the specified key; otherwise, . - is - The key type of the OrderedDictionary<TKey,TValue> is not in the inheritance hierarchy of . - - - - Gets a value indicating whether the OrderedDictionary<TKey,TValue> has a fixed size. - - if the OrderedDictionary<TKey,TValue> has a fixed size; otherwise, . The default is . - - - - Gets a value indicating whether the OrderedDictionary<TKey,TValue> collection is read-only. - - if the OrderedDictionary<TKey,TValue> is read-only; otherwise, . The default is . - - A collection that is read-only does not allow the addition, removal, or modification of elements after the collection is created. - A collection that is read-only is simply a collection with a wrapper that prevents modification of the collection; therefore, if changes are made to the underlying collection, the read-only collection reflects those changes. - - - - - Gets an object containing the keys in the OrderedDictionary<TKey,TValue>. - - An object containing the keys in the OrderedDictionary<TKey,TValue>. - The returned object is not a static copy; instead, the collection refers back to the keys in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the key collection. - - - - Returns the zero-based index of the specified key in the OrderedDictionary<TKey,TValue> - - The key to locate in the OrderedDictionary<TKey,TValue> - The zero-based index of , if is found in the OrderedDictionary<TKey,TValue>; otherwise, -1 - This method performs a linear search; therefore it has a cost of O(n) at worst. - - - - Removes the entry with the specified key from the OrderedDictionary<TKey,TValue> collection. - - The key of the entry to remove - if the key was found and the corresponding element was removed; otherwise, - - - - Removes the entry with the specified key from the OrderedDictionary<TKey,TValue> collection. - - The key of the entry to remove - - - - Gets an object containing the values in the OrderedDictionary<TKey,TValue> collection. - - An object containing the values in the OrderedDictionary<TKey,TValue> collection. - The returned object is not a static copy; instead, the refers back to the values in the original OrderedDictionary<TKey,TValue> collection. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the . - - - - Gets or sets the value with the specified key. - - The key of the value to get or set. - The value associated with the specified key. If the specified key is not found, attempting to get it returns , and attempting to set it creates a new element using the specified key. - - - - Gets or sets the value with the specified key. - - The key of the value to get or set. - The value associated with the specified key. If the specified key is not found, attempting to get it returns , and attempting to set it creates a new element using the specified key. - - - - Copies the elements of the OrderedDictionary<TKey,TValue> elements to a one-dimensional Array object at the specified index. - - The one-dimensional object that is the destination of the objects copied from the OrderedDictionary<TKey,TValue>. The must have zero-based indexing. - The zero-based index in at which copying begins. - The method preserves the order of the elements in the OrderedDictionary<TKey,TValue> - - - - Gets the number of key/values pairs contained in the OrderedDictionary<TKey,TValue> collection. - - The number of key/value pairs contained in the OrderedDictionary<TKey,TValue> collection. - - - - Gets a value indicating whether access to the OrderedDictionary<TKey,TValue> object is synchronized (thread-safe). - - This method always returns false. - - - - Gets an object that can be used to synchronize access to the OrderedDictionary<TKey,TValue> object. - - An object that can be used to synchronize access to the OrderedDictionary<TKey,TValue> object. - - - - Gets an ICollection<TKey> object containing the keys in the OrderedDictionary<TKey,TValue>. - - An ICollection<TKey> object containing the keys in the OrderedDictionary<TKey,TValue>. - The returned ICollection<TKey> object is not a static copy; instead, the collection refers back to the keys in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the key collection. - - - - Gets the value associated with the specified key. - - The key of the value to get. - When this method returns, contains the value associated with the specified key, if the key is found; otherwise, the default value for the type of . This parameter can be passed uninitialized. - if the OrderedDictionary<TKey,TValue> contains an element with the specified key; otherwise, . - - - - Gets an ICollection<TValue> object containing the values in the OrderedDictionary<TKey,TValue>. - - An ICollection<TValue> object containing the values in the OrderedDictionary<TKey,TValue>. - The returned ICollection<TKey> object is not a static copy; instead, the collection refers back to the values in the original OrderedDictionary<TKey,TValue>. Therefore, changes to the OrderedDictionary<TKey,TValue> continue to be reflected in the value collection. - - - - Adds the specified value to the OrderedDictionary<TKey,TValue> with the specified key. - - The KeyValuePair<TKey,TValue> structure representing the key and value to add to the OrderedDictionary<TKey,TValue>. - - - - Determines whether the OrderedDictionary<TKey,TValue> contains a specific key and value. - - The KeyValuePair<TKey,TValue> structure to locate in the OrderedDictionary<TKey,TValue>. - if is found in the OrderedDictionary<TKey,TValue>; otherwise, . - - - - Copies the elements of the OrderedDictionary<TKey,TValue> to an array of type , starting at the specified index. - - The one-dimensional array of type KeyValuePair<TKey,TValue> that is the destination of the KeyValuePair<TKey,TValue> elements copied from the OrderedDictionary<TKey,TValue>. The array must have zero-based indexing. - The zero-based index in at which copying begins. - - - - Removes a key and value from the dictionary. - - The KeyValuePair<TKey,TValue> structure representing the key and value to remove from the OrderedDictionary<TKey,TValue>. - if the key and value represented by is successfully found and removed; otherwise, . This method returns if is not found in the OrderedDictionary<TKey,TValue>. - - - - serialize/deserialize arbitrary objects - (objects must be serializable) - - - - - Serialize object to buffer - - serializable object - - - - - Deserialize buffer to object - - byte array to deserialize - - - - - manages a "region" in the redis key space - namespace can be cleared by incrementing the generation - - - - - get locking strategy - - - - - get current generation - - - - - - set new generation - - - - - - redis key for generation - - - - - - get redis key that holds all namespace keys - - - - - - get global cache key - - - - - - - get global key inside of this namespace - - - prefixes can be added for name deconfliction - - - - - replace UniqueCharacter with its double, to avoid name clash - - - - - - - - - - - - - - General purpose pipeline - - - - - - Flush send buffer, and read responses - - - - - Redis operation (transaction/pipeline) that allows queued commands to be completed - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - - - - - Redis-specific exception. Thrown if unable to connect to Redis server due to socket exception, for example. - - - - - Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). - - - - - Put "QUEUED" messages at back of queue - - - - - - Issue exec command (not queued) - - - - - callback for after result count is read in - - - - - - Wrap the common redis set operations under a ICollection[string] interface. - - - - - This class contains all the common operations for the RedisClient. - The client contains a 1:1 mapping of c# methods to redis operations of the same name. - - Not threadsafe use a pooled manager - - - - - Used to manage connection pooling - - - - - Gets or sets object key prefix. - - - - - Requires custom result parsing - - Number of results - - - - Command to set multuple binary safe arguments - - - - - - - Send command outside of managed Write Buffer - - - - - - reset buffer index in send buffer - - - - - Wrap the common redis list operations under a IList[string] interface. - - - - - Provides thread-safe pooling of redis client connections. - Allows load-balancing of master-write and read-slave hosts, ideal for - 1 master and multiple replicated read slaves. - - - - - Gets or sets object key prefix. - - - - - Hosts can be an IP Address or Hostname in the format: host[:port] - e.g. 127.0.0.1:6379 - default is: localhost:6379 - - The write hosts. - The read hosts. - The config. - - - - Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts - - - - - - Called within a lock - - - - - - Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. - - - - - - Called within a lock - - - - - - Disposes the read only client. - - The client. - - - - Disposes the write client. - - The client. - - - - Manage a client acquired from the PooledRedisClientManager - Dispose method will release the client back to the pool. - - - - - wrap the acquired client - - - - - - access the wrapped client - - - - - release the wrapped client back to the pool - - -
-
diff --git a/lib/tests/ServiceStack.Server.dll b/lib/tests/ServiceStack.Server.dll deleted file mode 100644 index 77ebb7c9..00000000 Binary files a/lib/tests/ServiceStack.Server.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Server.xml b/lib/tests/ServiceStack.Server.xml deleted file mode 100644 index b5c0f4c4..00000000 --- a/lib/tests/ServiceStack.Server.xml +++ /dev/null @@ -1,345 +0,0 @@ - - - - ServiceStack.Server - - - - - Creates a Redis MQ Server that processes each message on its own background thread. - i.e. if you register 3 handlers it will create 7 background threads: - - 1 listening to the Redis MQ Subscription, getting notified of each new message - - 3x1 Normal InQ for each message handler - - 3x1 PriorityQ for each message handler (Turn off with DisablePriorityQueues) - - When RedisMqServer Starts it creates a background thread subscribed to the Redis MQ Topic that - listens for new incoming messages. It also starts 2 background threads for each message type: - - 1 for processing the services Priority Queue and 1 processing the services normal Inbox Queue. - - Priority Queue's can be enabled on a message-per-message basis by specifying types in the - OnlyEnablePriortyQueuesForTypes property. The DisableAllPriorityQueues property disables all Queues. - - The Start/Stop methods are idempotent i.e. It's safe to call them repeatedly on multiple threads - and the Redis MQ Server will only have Started or Stopped once. - - - - - Execute global transformation or custom logic before a request is processed. - Must be thread-safe. - - - - - Execute global transformation or custom logic on the response. - Must be thread-safe. - - - - - Execute global error handler logic. Must be thread-safe. - - - - - If you only want to enable priority queue handlers (and threads) for specific msg types - - - - - Don't listen on any Priority Queues - - - - - Opt-in to only publish responses on this white list. - Publishes all responses by default. - - - - - Transient message queues are a one-pass message queue service that starts - processing messages when Start() is called. Any subsequent Start() calls - while the service is running is ignored. - - The transient service will continue to run until all messages have been - processed after which time it will shutdown all processing until Start() is called again. - - - - - Base rcon class. - - - - - Event delegate when disconnected from the server. - - - - - - Delegate for async callbacks. - - - - - - - Disconnected event. - - - - - Rcon connection socket. Always set to null when not connected. - - - - - Unique ID for each message. - - - - - Registered callbacks. - - - - - Create a new instance of rcon. - - Endpoint to connect to, usually the game server with query port. - - - - Attempts to connect to the game server for rcon operations. - - True if connection established, false otherwise. - - - - Processes a received packet. - - The packet. - - - - Disconnects from rcon. - - - - - Sends message to the server. - - Words to send. - - - - Game server endpoint. - - - - - Last exception that occured during operation. - - - - - Connected? - - - - - Gets the next unique ID to be used for transmisson. Read this before sending to pair responses to sent messages. - - - - - Exception thrown when attempting to send on a non-connected service client. - - - - - True if the packet originated on the server. - - - - - True if the packet is a response from a sent packet. - - - - - Sequence identifier. Unique to the connection. - - - - - Words. - - - - - Contains methods required for encoding and decoding rcon packets. - - - - - Decodes a packet. - - The packet. - A packet object. - - - - Decodes the packet header. - - - - - - - Decodes words in a packet. - - - - - - - Encodes a packet for transmission to the server. - - - - - - - - - - Encodes a packet header. - - - - - - - - - Encodes words. - - - - - - - Processing client used to interface with ServiceStack and allow a message to be processed. - Not an actual client. - - - - - Publish the specified message into the durable queue @queueName - - - - - Publish the specified message into the transient queue @queueName - - - - - Synchronous blocking get. - - - - - Non blocking get message - - - - - Hosting services via a binary-safe TCP-based protocol. - - - - - Factory to create consumers and producers that work with this service - - - - - Register DTOs and hanlders the MQ Host will process - - - - - - - Get Total Current Stats for all Message Handlers - - - - - - Get a Stats dump - - - - - - Start the MQ Host. Stops the server and restarts if already started. - - - - - Stop the MQ Host if not already stopped. - - - - - Processes a received packet. - - The packet. - - - - Understands how to store a to a MSSQL database. - - - - - Returns a new . - - - - - Stores to dbo.MiniProfilers under its ; - stores all child Timings and SqlTimings to their respective tables. - - - - - Loads the MiniProfiler identifed by 'id' from the database. - - - - - Returns a list of s that haven't been seen by . - - User identified by the current . - - - - Returns a connection to Sql Server. - - - - - Creates needed tables. Run this once on your database. - - - Works in sql server and sqlite (with documented removals). - - - - diff --git a/lib/tests/ServiceStack.Text.dll b/lib/tests/ServiceStack.Text.dll deleted file mode 100644 index ff020f23..00000000 Binary files a/lib/tests/ServiceStack.Text.dll and /dev/null differ diff --git a/lib/tests/ServiceStack.Text.xml b/lib/tests/ServiceStack.Text.xml deleted file mode 100644 index a0267c88..00000000 --- a/lib/tests/ServiceStack.Text.xml +++ /dev/null @@ -1,1400 +0,0 @@ - - - - ServiceStack.Text - - - - - Utils to load types - - - - - Find the type from the name supplied - - [typeName] or [typeName, assemblyName] - - - - - The top-most interface of the given type, if any. - - - - - Find type if it exists - - - - The type if it exists - - - - If AlwaysUseUtc is set to true then convert all DateTime to UTC. If PreserveUtc is set to true then UTC dates will not convert to local - - - - - - - Repairs an out-of-spec XML date/time string which incorrectly uses a space instead of a 'T' to separate the date from the time. - These string are occasionally generated by SQLite and can cause errors in OrmLite when reading these columns from the DB. - - The XML date/time string to repair - The repaired string. If no repairs were made, the original string is returned. - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - WCF Json format: /Date(unixts+0000)/ - - - - - - - Get the type(string) constructor if exists - - The type. - - - - - micro optimizations: using flags instead of value.IndexOfAny(EscapeChars) - - - - - - - Class to hold - - - - - - A fast, standards-based, serialization-issue free DateTime serailizer. - - - - - Determines whether this serializer can create the specified type from a string. - - The type. - - true if this instance [can create from string] the specified type; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Deserializes from reader. - - The reader. - - - - - Serializes to string. - - The value. - - - - - Serializes to writer. - - The value. - The writer. - - - - Sets which format to use when serializing TimeSpans - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - if the is configured - to support web-friendly serialized formats, ie emitting lowercase_underscore_casing for JSON - - - - - Define how property names are mapped during deserialization - - - - - Gets or sets a value indicating if the framework should throw serialization exceptions - or continue regardless of deserialization errors. If the framework - will throw; otherwise, it will parse as many fields as possible. The default is . - - - - - Gets or sets a value indicating if the framework should always convert to UTC format instead of local time. - - - - - Gets or sets a value indicating if the framework should skip automatic conversions. - Dates will be handled literally, any included timezone encoding will be lost and the date will be treaded as DateTimeKind.Local - Utc formatted input will result in DateTimeKind.Utc output. Any input without TZ data will be set DateTimeKind.Unspecified - This will take precedence over other flags like AlwaysUseUtc - JsConfig.DateHandler = DateHandler.ISO8601 should be used when set true for consistent de/serialization. - - - - - Gets or sets a value indicating if the framework should always assume is in UTC format if Kind is Unspecified. - - - - - Gets or sets whether we should append the Utc offset when we serialize Utc dates. Defaults to no. - Only supported for when the JsConfig.DateHandler == JsonDateHandler.TimestampOffset - - - - - Gets or sets a value indicating if unicode symbols should be serialized as "\uXXXX". - - - - - Gets or sets a value indicating if the framework should call an error handler when - an exception happens during the deserialization. - - Parameters have following meaning in order: deserialized entity, property name, parsed value, property type, caught exception. - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - If set to true, Interface types will be prefered over concrete types when serializing. - - - - - Sets the maximum depth to avoid circular dependencies - - - - - Set this to enable your own type construction provider. - This is helpful for integration with IoC containers where you need to call the container constructor. - Return null if you don't know how to construct the type and the parameterless constructor will be used. - - - - - Always emit type info for this type. Takes precedence over ExcludeTypeInfo - - - - - Never emit type info for this type - - - - - if the is configured - to take advantage of specification, - to support user-friendly serialized formats, ie emitting camelCasing for JSON - and parsing member names and enum values in a case-insensitive manner. - - - - - Define custom serialization fn for BCL Structs - - - - - Opt-in flag to set some Value Types to be treated as a Ref Type - - - - - Whether there is a fn (raw or otherwise) - - - - - Define custom raw serialization fn - - - - - Define custom serialization hook - - - - - Define custom after serialization hook - - - - - Define custom deserialization fn for BCL Structs - - - - - Define custom raw deserialization fn for objects - - - - - Exclude specific properties of this type from being serialized - - - - - The property names on target types must match property names in the JSON source - - - - - The property names on target types may not match the property names in the JSON source - - - - - Uses the xsd format like PT15H10M20S - - - - - Uses the standard .net ToString method of the TimeSpan class - - - - - Get JSON string value converted to T - - - - - Get JSON string value - - - - - Get JSON string value - - - - - Get unescaped string value - - - - - Get unescaped string value - - - - - Write JSON Array, Object, bool or number values as raw string - - - - - Creates an instance of a Type from a string value - - - - - Parses the specified value. - - The value. - - - - - Shortcut escape when we're sure value doesn't contain any escaped chars - - - - - - - Given a character as utf32, returns the equivalent string provided that the character - is legal json. - - - - - - - Micro-optimization keep pre-built char arrays saving a .ToCharArray() + function call (see .net implementation of .Write(string)) - - - - - Searches the string for one or more non-printable characters. - - The string to search. - True if there are any characters that require escaping. False if the value can be written verbatim. - - Micro optimizations: since quote and backslash are the only printable characters requiring escaping, removed previous optimization - (using flags instead of value.IndexOfAny(EscapeChars)) in favor of two equality operations saving both memory and CPU time. - Also slightly reduced code size by re-arranging conditions. - TODO: Possible Linq-only solution requires profiling: return value.Any(c => !c.IsPrintable() || c == QuoteChar || c == EscapeChar); - - - - - Implement the serializer using a more static approach - - - - - - Implement the serializer using a more static approach - - - - - - Pretty Thread-Safe cache class from: - http://code.google.com/p/dapper-dot-net/source/browse/Dapper/SqlMapper.cs - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - Represents an individual object, allowing access to members by-name - - - - - Get or Set the value of a named member for the underlying object - - - - - The object represented by this instance - - - - - Use the target types definition of equality - - - - - Obtain the hash of the target object - - - - - Use the target's definition of a string representation - - - - - Wraps an individual object, allowing by-name access to that instance - - - - - Provides by-name member-access to objects of a given type - - - - - Does this type support new instances via a parameterless constructor? - - - - - Create a new instance of this type - - - - - Provides a type-specific accessor, allowing by-name access for all objects of that type - - The accessor is cached internally; a pre-existing accessor may be returned - - - - Get or set the value of a named member on the target instance - - - - - Generic implementation of object pooling pattern with predefined pool size limit. The main - purpose is that limited number of frequently used objects can be kept in the pool for - further recycling. - - Notes: - 1) it is not the goal to keep all returned objects. Pool is not meant for storage. If there - is no space in the pool, extra returned objects will be dropped. - - 2) it is implied that if object was obtained from a pool, the caller will return it back in - a relatively short time. Keeping checked out objects for long durations is ok, but - reduces usefulness of pooling. Just new up your own. - - Not returning objects to the pool in not detrimental to the pool's work, but is a bad practice. - Rationale: - If there is no intent for reusing the object, do not use pool - just use "new". - - - - - Not using System.Func{T} because this file is linked into the (debugger) Formatter, - which does not have that type (since it compiles against .NET 2.0). - - - - - Produces an instance. - - - Search strategy is a simple linear probing which is chosen for it cache-friendliness. - Note that Free will try to store recycled objects close to the start thus statistically - reducing how far we will typically search. - - - - - Returns objects to the pool. - - - Search strategy is a simple linear probing which is chosen for it cache-friendliness. - Note that Free will try to store recycled objects close to the start thus statistically - reducing how far we will typically search in Allocate. - - - - - Removes an object from leak tracking. - - This is called when an object is returned to the pool. It may also be explicitly - called if an object allocated from the pool is intentionally not being returned - to the pool. This can be of use with pooled arrays if the consumer wants to - return a larger array to the pool than was originally allocated. - - - - - this is RAII object to automatically release pooled object when its owning pool - - - - - Shared object pool for roslyn - - Use this shared pool if only concern is reducing object allocations. - if perf of an object pool itself is also a concern, use ObjectPool directly. - - For example, if you want to create a million of small objects within a second, - use the ObjectPool directly. it should have much less overhead than using this. - - - - - pool that uses default constructor with 100 elements pooled - - - - - pool that uses default constructor with 20 elements pooled - - - - - pool that uses string as key with StringComparer.OrdinalIgnoreCase as key comparer - - - - - pool that uses string as element with StringComparer.OrdinalIgnoreCase as element comparer - - - - - pool that uses string as element with StringComparer.Ordinal as element comparer - - - - - Used to reduce the # of temporary byte[]s created to satisfy serialization and - other I/O requests - - - - pooled memory : 4K * 512 = 4MB - - - - Reusable StringBuilder ThreadStatic Cache - - - - - Alternative Reusable StringBuilder ThreadStatic Cache - - - - - Reusable StringWriter ThreadStatic Cache - - - - - Alternative Reusable StringWriter ThreadStatic Cache - - - - - Manages pools of RecyclableMemoryStream objects. - - - There are two pools managed in here. The small pool contains same-sized buffers that are handed to streams - as they write more data. - - For scenarios that need to call GetBuffer(), the large pool contains buffers of various sizes, all - multiples of LargeBufferMultiple (1 MB by default). They are split by size to avoid overly-wasteful buffer - usage. There should be far fewer 8 MB buffers than 1 MB buffers, for example. - - - - - Generic delegate for handling events without any arguments. - - - - - Delegate for handling large buffer discard reports. - - Reason the buffer was discarded. - - - - Delegate for handling reports of stream size when streams are allocated - - Bytes allocated. - - - - Delegate for handling periodic reporting of memory use statistics. - - Bytes currently in use in the small pool. - Bytes currently free in the small pool. - Bytes currently in use in the large pool. - Bytes currently free in the large pool. - - - - pools[0] = 1x largeBufferMultiple buffers - pools[1] = 2x largeBufferMultiple buffers - etc., up to maximumBufferSize - - - - - Initializes the memory manager with the default block/buffer specifications. - - - - - Initializes the memory manager with the given block requiredSize. - - Size of each block that is pooled. Must be > 0. - Each large buffer will be a multiple of this value. - Buffers larger than this are not pooled - blockSize is not a positive number, or largeBufferMultiple is not a positive number, or maximumBufferSize is less than blockSize. - maximumBufferSize is not a multiple of largeBufferMultiple - - - - The size of each block. It must be set at creation and cannot be changed. - - - - - All buffers are multiples of this number. It must be set at creation and cannot be changed. - - - - - Gets or sets the maximum buffer size. - - Any buffer that is returned to the pool that is larger than this will be - discarded and garbage collected. - - - - Number of bytes in small pool not currently in use - - - - - Number of bytes currently in use by stream from the small pool - - - - - Number of bytes in large pool not currently in use - - - - - Number of bytes currently in use by streams from the large pool - - - - - How many blocks are in the small pool - - - - - How many buffers are in the large pool - - - - - How many bytes of small free blocks to allow before we start dropping - those returned to us. - - - - - How many bytes of large free buffers to allow before we start dropping - those returned to us. - - - - - Maximum stream capacity in bytes. Attempts to set a larger capacity will - result in an exception. - - A value of 0 indicates no limit. - - - - Whether to save callstacks for stream allocations. This can help in debugging. - It should NEVER be turned on generally in production. - - - - - Whether dirty buffers can be immediately returned to the buffer pool. E.g. when GetBuffer() is called on - a stream and creates a single large buffer, if this setting is enabled, the other blocks will be returned - to the buffer pool immediately. - Note when enabling this setting that the user is responsible for ensuring that any buffer previously - retrieved from a stream which is subsequently modified is not used after modification (as it may no longer - be valid). - - - - - Removes and returns a single block from the pool. - - A byte[] array - - - - Returns a buffer of arbitrary size from the large buffer pool. This buffer - will be at least the requiredSize and always be a multiple of largeBufferMultiple. - - The minimum length of the buffer - The tag of the stream returning this buffer, for logging if necessary. - A buffer of at least the required size. - - - - Returns the buffer to the large pool - - The buffer to return. - The tag of the stream returning this buffer, for logging if necessary. - buffer is null - buffer.Length is not a multiple of LargeBufferMultiple (it did not originate from this pool) - - - - Returns the blocks to the pool - - Collection of blocks to return to the pool - The tag of the stream returning these blocks, for logging if necessary. - blocks is null - blocks contains buffers that are the wrong size (or null) for this memory manager - - - - Retrieve a new MemoryStream object with no tag and a default initial capacity. - - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and a default initial capacity. - - A tag which can be used to track the source of the stream. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and at least the given capacity. - - A tag which can be used to track the source of the stream. - The minimum desired capacity for the stream. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and at least the given capacity, possibly using - a single continugous underlying buffer. - - Retrieving a MemoryStream which provides a single contiguous buffer can be useful in situations - where the initial size is known and it is desirable to avoid copying data between the smaller underlying - buffers to a single large one. This is most helpful when you know that you will always call GetBuffer - on the underlying stream. - A tag which can be used to track the source of the stream. - The minimum desired capacity for the stream. - Whether to attempt to use a single contiguous buffer. - A MemoryStream. - - - - Retrieve a new MemoryStream object with the given tag and with contents copied from the provided - buffer. The provided buffer is not wrapped or used after construction. - - The new stream's position is set to the beginning of the stream when returned. - A tag which can be used to track the source of the stream. - The byte buffer to copy data from. - The offset from the start of the buffer to copy from. - The number of bytes to copy from the buffer. - A MemoryStream. - - - - Triggered when a new block is created. - - - - - Triggered when a new block is created. - - - - - Triggered when a new large buffer is created. - - - - - Triggered when a new stream is created. - - - - - Triggered when a stream is disposed. - - - - - Triggered when a stream is finalized. - - - - - Triggered when a stream is finalized. - - - - - Triggered when a user converts a stream to array. - - - - - Triggered when a large buffer is discarded, along with the reason for the discard. - - - - - Periodically triggered to report usage statistics. - - - - - MemoryStream implementation that deals with pooling and managing memory streams which use potentially large - buffers. - - - This class works in tandem with the RecylableMemoryStreamManager to supply MemoryStream - objects to callers, while avoiding these specific problems: - 1. LOH allocations - since all large buffers are pooled, they will never incur a Gen2 GC - 2. Memory waste - A standard memory stream doubles its size when it runs out of room. This - leads to continual memory growth as each stream approaches the maximum allowed size. - 3. Memory copying - Each time a MemoryStream grows, all the bytes are copied into new buffers. - This implementation only copies the bytes when GetBuffer is called. - 4. Memory fragmentation - By using homogeneous buffer sizes, it ensures that blocks of memory - can be easily reused. - - The stream is implemented on top of a series of uniformly-sized blocks. As the stream's length grows, - additional blocks are retrieved from the memory manager. It is these blocks that are pooled, not the stream - object itself. - - The biggest wrinkle in this implementation is when GetBuffer() is called. This requires a single - contiguous buffer. If only a single block is in use, then that block is returned. If multiple blocks - are in use, we retrieve a larger buffer from the memory manager. These large buffers are also pooled, - split by size--they are multiples of a chunk size (1 MB by default). - - Once a large buffer is assigned to the stream the blocks are NEVER again used for this stream. All operations take place on the - large buffer. The large buffer can be replaced by a larger buffer from the pool as needed. All blocks and large buffers - are maintained in the stream until the stream is disposed (unless AggressiveBufferReturn is enabled in the stream manager). - - - - - - All of these blocks must be the same size - - - - - This is only set by GetBuffer() if the necessary buffer is larger than a single block size, or on - construction if the caller immediately requests a single large buffer. - - If this field is non-null, it contains the concatenation of the bytes found in the individual - blocks. Once it is created, this (or a larger) largeBuffer will be used for the life of the stream. - - - - - This list is used to store buffers once they're replaced by something larger. - This is for the cases where you have users of this class that may hold onto the buffers longer - than they should and you want to prevent race conditions which could corrupt the data. - - - - - Unique identifier for this stream across it's entire lifetime - - Object has been disposed - - - - A temporary identifier for the current usage of this stream. - - Object has been disposed - - - - Gets the memory manager being used by this stream. - - Object has been disposed - - - - Callstack of the constructor. It is only set if MemoryManager.GenerateCallStacks is true, - which should only be in debugging situations. - - - - - Callstack of the Dispose call. It is only set if MemoryManager.GenerateCallStacks is true, - which should only be in debugging situations. - - - - - This buffer exists so that WriteByte can forward all of its calls to Write - without creating a new byte[] buffer on every call. - - - - - Allocate a new RecyclableMemoryStream object. - - The memory manager - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - The initial requested size to prevent future allocations - - - - Allocate a new RecyclableMemoryStream object - - The memory manager - A string identifying this stream for logging and debugging purposes - The initial requested size to prevent future allocations - An initial buffer to use. This buffer will be owned by the stream and returned to the memory manager upon Dispose. - - - - Returns the memory used by this stream back to the pool. - - Whether we're disposing (true), or being called by the finalizer (false) - This method is not thread safe and it may not be called more than once. - - - - Equivalent to Dispose - - - - - Gets or sets the capacity - - Capacity is always in multiples of the memory manager's block size, unless - the large buffer is in use. Capacity never decreases during a stream's lifetime. - Explicitly setting the capacity to a lower value than the current value will have no effect. - This is because the buffers are all pooled by chunks and there's little reason to - allow stream truncation. - - Object has been disposed - - - - Gets the number of bytes written to this stream. - - Object has been disposed - - - - Gets the current position in the stream - - Object has been disposed - - - - Whether the stream can currently read - - - - - Whether the stream can currently seek - - - - - Always false - - - - - Whether the stream can currently write - - - - - Returns a single buffer containing the contents of the stream. - The buffer may be longer than the stream length. - - A byte[] buffer - IMPORTANT: Doing a Write() after calling GetBuffer() invalidates the buffer. The old buffer is held onto - until Dispose is called, but the next time GetBuffer() is called, a new buffer from the pool will be required. - Object has been disposed - - - - Returns a new array with a copy of the buffer's contents. You should almost certainly be using GetBuffer combined with the Length to - access the bytes in this stream. Calling ToArray will destroy the benefits of pooled buffers, but it is included - for the sake of completeness. - - Object has been disposed - - - - Reads from the current position into the provided buffer - - Destination buffer - Offset into buffer at which to start placing the read bytes. - Number of bytes to read. - The number of bytes read - buffer is null - offset or count is less than 0 - offset subtracted from the buffer length is less than count - Object has been disposed - - - - Writes the buffer to the stream - - Source buffer - Start position - Number of bytes to write - buffer is null - offset or count is negative - buffer.Length - offset is not less than count - Object has been disposed - - - - Returns a useful string for debugging. This should not normally be called in actual production code. - - - - - Writes a single byte to the current position in the stream. - - byte value to write - Object has been disposed - - - - Reads a single byte from the current position in the stream. - - The byte at the current position, or -1 if the position is at the end of the stream. - Object has been disposed - - - - Sets the length of the stream - - value is negative or larger than MaxStreamLength - Object has been disposed - - - - Sets the position to the offset from the seek location - - How many bytes to move - From where - The new position - Object has been disposed - offset is larger than MaxStreamLength - Invalid seek origin - Attempt to set negative position - - - - Synchronously writes this stream's bytes to the parameter stream. - - Destination stream - Important: This does a synchronous write, which may not be desired in some situations - - - - Release the large buffer (either stores it for eventual release or returns it immediately). - - - - - A class to allow the conversion of doubles to string representations of - their exact decimal values. The implementation aims for readability over - efficiency. - - Courtesy of @JonSkeet - http://www.yoda.arachsys.com/csharp/DoubleConverter.cs - - - - - - - - How many digits are *after* the decimal point - - - - - Constructs an arbitrary decimal expansion from the given long. - The long must not be negative. - - - - - Multiplies the current expansion by the given amount, which should - only be 2 or 5. - - - - - Shifts the decimal point; a negative value makes - the decimal expansion bigger (as fewer digits come after the - decimal place) and a positive value makes the decimal - expansion smaller. - - - - - Removes leading/trailing zeroes from the expansion. - - - - - Converts the value to a proper decimal string representation. - - - - - Creates an instance of a Type from a string value - - - - - Determines whether the specified type is convertible from string. - - The type. - - true if the specified type is convertible from string; otherwise, false. - - - - - Parses the specified value. - - The value. - - - - - Parses the specified type. - - The type. - The value. - - - - - Useful extension method to get the Dictionary[string,string] representation of any POCO type. - - - - - - Recursively prints the contents of any POCO object in a human-friendly, readable format - - - - - - Print Dump to Console.WriteLine - - - - - Print string.Format to Console.WriteLine - - - - - Parses the specified value. - - The value. - - - - - Populate an object with Example data. - - - - - - - Populates the object with example data. - - - Tracks how deeply nested we are - - - - - Public Code API to register commercial license for ServiceStack. - - - - - Internal Utilities to verify licensing - - - - - Maps the path of a file in the context of a VS project - - the relative path - the absolute path - Assumes static content is two directories above the /bin/ directory, - eg. in a unit test scenario the assembly would be in /bin/Debug/. - - - - Maps the path of a file in a self-hosted scenario - - the relative path - the absolute path - Assumes static content is copied to /bin/ folder with the assemblies - - - - Maps the path of a file in an Asp.Net hosted scenario - - the relative path - the absolute path - Assumes static content is in the parent folder of the /bin/ directory - - - - Implement the serializer using a more static approach - - - - - - Creates a new instance of type. - First looks at JsConfig.ModelFactory before falling back to CreateInstance - - - - - Creates a new instance of type. - First looks at JsConfig.ModelFactory before falling back to CreateInstance - - - - - Creates a new instance from the default constructor of type - - - - - Add a Property attribute at runtime. - Not threadsafe, should only add attributes on Startup. - - - - - Add a Property attribute at runtime. - Not threadsafe, should only add attributes on Startup. - - - - - @jonskeet: Collection of utility methods which operate on streams. - r285, February 26th 2009: http://www.yoda.arachsys.com/csharp/miscutil/ - - - - - Reads the given stream up to the end, returning the data as a byte - array. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer size. - - - - - Reads the given stream up to the end, returning the data as a byte - array, using the given buffer for transferring data. Note that the - current contents of the buffer is ignored, so the buffer needn't - be cleared beforehand. - - - - - Copies all the data from one stream into another. - - - - - Copies all the data from one stream into another, using a buffer - of the given size. - - - - - Copies all the data from one stream into another, using the given - buffer for transferring data. Note that the current contents of - the buffer is ignored, so the buffer needn't be cleared beforehand. - - - - - Reads exactly the given number of bytes from the specified stream. - If the end of the stream is reached before the specified amount - of data is read, an exception is thrown. - - - - - Reads into a buffer, filling it completely. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Reads exactly the given number of bytes from the specified stream, - into the given buffer, starting at position 0 of the array. - - - - - Same as ReadExactly, but without the argument checks. - - - - - Converts from base: 0 - 62 - - The source. - From. - To. - - - - - Skip the encoding process for 'safe strings' - - - - - - diff --git a/lib/tests/ServiceStack.XML b/lib/tests/ServiceStack.XML deleted file mode 100644 index a449de12..00000000 --- a/lib/tests/ServiceStack.XML +++ /dev/null @@ -1,8871 +0,0 @@ - - - - ServiceStack - - - - - Default redirct URL if [Authenticate] attribute doesn't permit access. - - - - - Default redirct URL if Required Role or Permission attributes doesn't permit access. - - - - - The Interface Auth Repositories need to implement to support API Keys - - - - - The POCO Table used to persist API Keys - - - - - Enable access to protected Services using API Keys - - - - - Modify the registration of GetApiKeys and RegenerateApiKeys Services - - - - - How much entropy should the generated keys have. (default 24) - - - - - Generate different keys for different environments. (default live,test) - - - - - Different types of Keys each user can have. (default secret) - - - - - Whether to automatically expire keys. (default no expiry) - - - - - Automatically create the ApiKey Table for AuthRepositories which need it. (default true) - - - - - Whether to only allow access via API Key from a secure connection. (default true) - - - - - Change how API Key is generated - - - - - Run custom filter after API Key is created - - - - - Used to Issue and process JWT Tokens and registers ConvertSessionToToken Service to convert Sessions to JWT Tokens - - - - - Whether to populate the Bearer Token in the AuthenticateResponse - - - - - Enable access to protected Services using JWT Tokens - - - - - Different HMAC Algorithms supported - - - - - Different RSA Signing Algorithms supported - - - - - Whether to only allow access via API Key from a secure connection. (default true) - - - - - Run custom filter after JWT Header is created - - - - - Run custom filter after JWT Payload is created - - - - - Run custom filter after session is restored from a JWT Token - - - - - Whether to encrypt JWE Payload (default false). - Uses RSA-OAEP for Key Encryption and AES/128/CBC HMAC SHA256 for Conent Encryption - - - - - Which Hash Algorithm should be used to sign the JWT Token. (default HS256) - - - - - Whether to only allow processing of JWT Tokens using the configured HashAlgorithm. (default true) - - - - - The Issuer to embed in the token. (default ssjwt) - - - - - The Audience to embed in the token. (default null) - - - - - What Id to use to identify the Key used to sign the token. (default First 3 chars of Base64 Key) - - - - - The AuthKey used to sign the JWT Token - - - - - Allow verification using multiple Auth keys - - - - - The RSA Private Key used to Sign the JWT Token when RSA is used - - - - - Convenient overload to intialize the Private Key via exported XML - - - - - The RSA Public Key used to Verify the JWT Token when RSA is used - - - - - Convenient overload to intialize the Public Key via exported XML - - - - - Allow verification using multiple public keys - - - - - How long should JWT Tokens be valid for. (default 14 days) - - - - - Convenient overload to initialize ExpireTokensIn with an Integer - - - - - Whether to invalidate all JWT Tokens issued before a specified date. - - - - - Modify the registration of ConvertSessionToToken Service - - - - - Specify all roles to be used by this application - - - - - Only allow access to users in specified roles - - - - - Conveneint base class with empty virtual methods so subclasses only need to override the hooks they need. - - - - - Create Odnoklassniki App at: http://www.odnoklassniki.ru/devaccess - The Callback URL for your app should match the CallbackUrl provided. - - NB: They claim they use OAuth 2.0, but they in fact don't. - http://apiok.ru/wiki/display/api/Authorization+OAuth+2.0 - - - - - Create VK App at: http://vk.com/editapp?act=create - The Callback URL for your app should match the CallbackUrl provided. - - - - - If previous attemts failes, the subsequential calls - build up code value like "code1,code2,code3" - so we need the last one only - - - - - - - Create Yandex App at: https://oauth.yandex.ru/client/new - The Callback URL for your app should match the CallbackUrl provided. - - - - - Create an App at: https://github.com/settings/applications/new - The Callback URL for your app should match the CallbackUrl provided. - - - - - Calling to Github API without defined Useragent throws - exception "The server committed a protocol violation. Section=ResponseStatusLine" - - - - - Creates the required missing tables or DB schema - - - - - Inject logic into existing services by introspecting the request and injecting your own - validation logic. Exceptions thrown will have the same behaviour as if the service threw it. - - If a non-null object is returned the request will short-circuit and return that response. - - The instance of the service - GET,POST,PUT,DELETE - - Response DTO; non-null will short-circuit execution and return that response - - - - Public API entry point to authenticate via code - - - null; if already autenticated otherwise a populated instance of AuthResponse - - - - The specified may change as a side-effect of this method. If - subsequent code relies on current data be sure to reload - the session istance via . - - - - - Allows specifying a global fallback config that if exists is formatted with the Provider as the first arg. - E.g. this appSetting with the TwitterAuthProvider: - oauth.CallbackUrl="http://localhost:11001/auth/{0}" - Would result in: - oauth.CallbackUrl="http://localhost:11001/auth/twitter" - - - - - - Remove the Users Session - - - - - - - - Saves the Auth Tokens for this request. Called in OnAuthenticated(). - Overrideable, the default behaviour is to call IUserAuthRepository.CreateOrMergeAuthSession(). - - - - - Create a Facebook App at: https://developers.facebook.com/apps - The Callback URL for your app should match the CallbackUrl provided. - - - - - Download Yammer User Info given its ID. - - - The Yammer User ID. - - - The User info in JSON format. - - - - Yammer provides a method to retrieve current user information via - "https://www.yammer.com/api/v1/users/current.json". - - - However, to ensure consistency with the rest of the Auth codebase, - the explicit URL will be used, where [:id] denotes the User ID: - "https://www.yammer.com/api/v1/users/[:id].json" - - - Refer to: https://developer.yammer.com/restapi/ for full documentation. - - - - - - Remove the Users Session - - - - - - - - The entry point for all AuthProvider providers. Runs inside the AuthService so exceptions are treated normally. - Overridable so you can provide your own Auth implementation. - - - - - Determine if the current session is already authenticated with this AuthProvider - - - - - Thread-safe In memory UserAuth data store so it can be used without a dependency on Redis. - - - - - The entry point for all AuthProvider providers. Runs inside the AuthService so exceptions are treated normally. - Overridable so you can provide your own Auth implementation. - - - - - - - - - Sets the CallbackUrl and session.ReferrerUrl if not set and initializes the session tokens for this AuthProvider - - - - - - - - - Update an existing registraiton - - - - - Create new Registration - - - - - Logic to update UserAuth from Registration info, not enabled on PUT because of security. - - - - - Thank you Martijn - http://www.dijksterhuis.org/creating-salted-hash-values-in-c/ - - Stronger/Slower Alternative: - https://github.com/defuse/password-hashing/blob/master/PasswordStorage.cs - - - - - Create an app at https://dev.twitter.com/apps to get your ConsumerKey and ConsumerSecret for your app. - The Callback URL for your app should match the CallbackUrl provided. - - - - - The ServiceStack Yammer OAuth provider. - - - - This provider is loosely based on the existing ServiceStack's Facebook OAuth provider. - - - For the full info on Yammer's OAuth2 authentication flow, refer to: - https://developer.yammer.com/authentication/#a-oauth2 - - - Note: Add these to your application / web config settings under appSettings and replace - values as appropriate. - - - - - - - - - ]]> - - - - - - The OAuth provider name / identifier. - - - - - Initializes a new instance of the class. - - - The application settings (in web.config). - - - - - Gets or sets the Yammer OAuth client id. - - - - - Gets or sets the Yammer OAuth client secret. - - - - - Gets or sets the Yammer OAuth pre-auth url. - - - - - Authenticate against Yammer OAuth endpoint. - - - The auth service. - - - The session. - - - The request. - - - The . - - - - - Load the UserAuth info into the session. - - - The User session. - - - The OAuth tokens. - - - The auth info. - - - - - Load the UserOAuth info into the session. - - - The auth session. - - - The OAuth tokens. - - - - - The Yammer User's email addresses. - - - - - Gets or sets the email address type (e.g. primary). - - - - - Gets or sets the email address. - - - - - The BaseUrl of the ServiceStack instance (inferred) - - - - - Name of the ServiceStack Instance (inferred) - - - - - Textual description of the AutoQuery Services (shown in Home Services list) - - - - - Icon for this ServiceStack Instance (shown in Home Services list) - - - - - The different Content Type formats to display - - - - - The configured MaxLimit for AutoQuery - - - - - Whether to publish this Service to the public Services registry - - - - - Only show AutoQuery Services attributed with [AutoQueryViewer] - - - - - List of different Search Filters available - - - - - The Column which should be selected by default - - - - - The Query Type filter which should be selected by default - - - - - The search text which should be populated by default - - - - - Link to your website users can click to find out more about you - - - - - A custom logo or image that users can click on to visit your site - - - - - The default color of text - - - - - The default color of links - - - - - The default background color of each screen - - - - - The default background image of each screen anchored to the bottom left - - - - - The default icon for each of your AutoQuery Services - - - - - Returns true if the User Is Authenticated - - - - - How many queries are available to this user - - - - - The CacheKey to be use store the response against - - - - - The base Cache Key used to cache the Service response - - - - - Additional CacheKey Modifiers used to cache different outputs for a single Service Response - - - - - How long to cache the resource for. Fallsback to HttpCacheFeature.DefaultExpiresIn - - - - - The unique ETag returned for this resource clients can use to determine whether their local version has changed - - - - - The Age for this resource returned to clients - - - - - The MaxAge returned to clients to indicate how long they can use their local cache before re-validating - - - - - The LastModified date to use for the Cache and HTTP Header - - - - - Cache-Control HTTP Headers - - - - - Create unique cache per user - - - - - Use HostContext.LocalCache or HostContext.Cache - - - - - Skip compression for this Cache Result - - - - - Cache the Response of a Service - - - - - Cache expiry in seconds - - - - - MaxAge in seconds - - - - - Cache-Control HTTP Headers - - - - - Vary cache per user - - - - - Vary cache for users in these roles - - - - - Use HostContext.LocalCache or HostContext.Cache - - - - - Skip compression for this Cache Result - - - - - Creates a new property rule from a lambda expression. - - - - - Default validator selector that will execute all rules that do not belong to a RuleSet. - - - - - Determines whether or not a rule should execute. - - The rule - Property path (eg Customer.Address.Line1) - Contextual information - Whether or not the validator can execute. - - - - Custom IValidationRule for performing custom logic. - - - - - - Rule set to which this rule belongs. - - - - - Creates a new DelegateValidator using the specified function to perform validation. - - - - - Creates a new DelegateValidator using the specified function to perform validation. - - - - - Performs validation using a validation context and returns a collection of Validation Failures. - - Validation Context - A collection of validation failures - - - - The validators that are grouped under this rule. - - - - - Performs validation using a validation context and returns a collection of Validation Failures. - - Validation Context - A collection of validation failures - - - - Useful extensions - - - - - Gets a MemberInfo from a member expression. - - - - - Gets a MemberInfo from a member expression. - - - - - Splits pascal case, so "FooBar" would become "Foo Bar" - - - - - Helper method to construct a constant expression from a constant. - - Type of object being validated - Type of property being validated - The value being compared - - - - - Based on a child validator and a propery rule, infers whether the validator should be wrapped in a ChildValidatorAdaptor or a CollectionValidatorAdaptor - - - - - Represents an object that is configurable. - - Type of object being configured - Return type - - - - Configures the current object. - - Action to configure the object. - - - - - Instancace cache. - TODO: This isn't actually completely thread safe. It would be much better to use ConcurrentDictionary, but this isn't available in Silverlight/WP7. - - - - - Gets or creates an instance using Activator.CreateInstance - - The type to instantiate - The instantiated object - - - - Gets or creates an instance using a custom factory - - The type to instantiate - The custom factory - The instantiated object - - - - Determines whether or not a rule should execute. - - - - - Determines whether or not a rule should execute. - - The rule - Property path (eg Customer.Address.Line1) - Contextual information - Whether or not the validator can execute. - - - - Selects validators that are associated with a particular property. - - - - - Creates a new instance of MemberNameValidatorSelector. - - - - - Determines whether or not a rule should execute. - - The rule - Property path (eg Customer.Address.Line1) - Contextual information - Whether or not the validator can execute. - - - - Creates a MemberNameValidatorSelector from a collection of expressions. - - - - - Assists in the construction of validation messages. - - - - - Default Property Name placeholder. - - - - - Adds a value for a validation message placeholder. - - - - - - - - Appends a property name to the message. - - The name of the property - - - - - Adds additional arguments to the message for use with standard string placeholders. - - Additional arguments - - - - - Constructs the final message from the specified template. - - Message template - The message with placeholders replaced with their appropriate values - - - - Represents a chain of properties - - - - - Creates a new PropertyChain. - - - - - Creates a new PropertyChain based on another. - - - - - Adds a MemberInfo instance to the chain - - Member to add - - - - Adds a property name to the chain - - Name of the property to add - - - - Adds an indexer to the property chain. For example, if the following chain has been constructed: - Parent.Child - then calling AddIndexer(0) would convert this to: - Parent.Child[0] - - - - - - Creates a string representation of a property chain. - - - - - Checks if the current chain is the child of another chain. - For example, if chain1 were for "Parent.Child" and chain2 were for "Parent.Child.GrandChild" then - chain2.IsChildChainOf(chain1) would be true. - - The parent chain to compare - True if the current chain is the child of the other chain, otherwise false - - - - Builds a property path. - - - - - Defines a rule associated with a property. - - - - - Property associated with this rule. - - - - - Function that can be invoked to retrieve the value of the property. - - - - - Expression that was used to create the rule. - - - - - String source that can be used to retrieve the display name (if null, falls back to the property name) - - - - - Rule set that this rule belongs to (if specified) - - - - - Function that will be invoked if any of the validators associated with this rule fail. - - - - - The current validator being configured by this rule. - - - - - Type of the property being validated - - - - - Cascade mode for this rule. - - - - - Validators associated with this rule. - - - - - Creates a new property rule. - - Property - Function to get the property value - Lambda expression used to create the rule - Function to get the cascade mode. - Type to validate - Container type that owns the property - - - - Creates a new property rule from a lambda expression. - - - - - Creates a new property rule from a lambda expression. - - - - - Adds a validator to the rule. - - - - - Replaces a validator in this rule. Used to wrap validators. - - - - - Returns the property name for the property being validated. - Returns null if it is not a property being validated (eg a method call) - - - - - Display name for the property. - - - - - Performs validation using a validation context and returns a collection of Validation Failures. - - Validation Context - A collection of validation failures - - - - Invokes a property validator using the specified validation context. - - - - - Builds a validation rule and constructs a validator. - - Type of object being validated - Type of property being validated - - - - The rule being created by this RuleBuilder. - - - - - Creates a new instance of the RuleBuilder class. - - - - - Sets the validator associated with the rule. - - The validator to set - - - - - Sets the validator associated with the rule. Use with complex properties where an IValidator instance is already declared for the property type. - - The validator to set - - - - Selects validators that belong to the specified rulesets. - - - - - Creates a new instance of the RulesetValidatorSelector. - - - - - Determines whether or not a rule should execute. - - The rule - Property path (eg Customer.Address.Line1) - Contextual information - Whether or not the validator can execute. - - - - Base class for entity validator classes. - - The type of the object being validated - - - - Sets the cascade mode for all rules within this validator. - - - - - Validates the specified instance - - The object to validate - A ValidationResult object containing any validation failures - - - - Validates the specified instance. - - Validation Context - A ValidationResult object containing any validation failures. - - - - Adds a rule to the current validator. - - - - - - Creates a that can be used to obtain metadata about the current validator. - - - - - Defines a validation rule for a specify property. - - - RuleFor(x => x.Surname)... - - The type of property being validated - The expression representing the property to validate - an IRuleBuilder instance on which validators can be defined - - - - Defines a custom validation rule using a lambda expression. - If the validation rule fails, it should return a instance of a ValidationFailure - If the validation rule succeeds, it should return null. - - A lambda that executes custom validation rules. - - - - Defines a custom validation rule using a lambda expression. - If the validation rule fails, it should return an instance of ValidationFailure - If the validation rule succeeds, it should return null. - - A lambda that executes custom validation rules - - - - Defines a RuleSet that can be used to group together several validators. - - The name of the ruleset. - Action that encapsulates the rules in the ruleset. - - - - Defines a RuleSet that can be used to provide specific validation rules for specific HTTP methods (GET, POST...) - - The HTTP methods where this rule set should be used. - Action that encapuslates the rules in the ruleset. - - - - Defines a condition that applies to several rules - - The condition that should apply to multiple rules - Action that encapsulates the rules. - - - - - Defiles an inverse condition that applies to several rules - - The condition that should be applied to multiple rules - Action that encapsulates the rules - - - - Returns an enumerator that iterates through the collection of validation rules. - - - A that can be used to iterate through the collection. - - 1 - - - - Class that can be used to find all the validators from a collection of types. - - - - - Creates a scanner that works on a sequence of types. - - - - - Finds all the validators in the specified assembly. - - - - - Finds all the validators in the assembly containing the specified type. - - - - - Performs the specified action to all of the assembly scan results. - - - - - Returns an enumerator that iterates through the collection. - - - A that can be used to iterate through the collection. - - 1 - - - - Result of performing a scan. - - - - - Creates an instance of an AssemblyScanResult. - - - - - Validator interface type, eg IValidator<Foo> - - - - - Concrete type that implements the InterfaceType, eg FooValidator. - - - - - Implementation of IValidatorFactory that looks for ValidatorAttribute instances on the specified type in order to provide the validator instance. - - - - - Gets a validator for the appropriate type. - - - - - Gets a validator for the appropriate type. - - - - - Validator attribute to define the class that will describe the Validation rules - - - - - The type of the validator used to validate the current type. - - - - - Creates an instance of the ValidatorAttribute allowing a validator type to be specified. - - - - - Associates an instance of IValidator with the current property rule and is used to validate each item within the collection. - - The validator to use - - - - Extension methods that provide the default set of validators. - - - - - Defines a 'not null' validator on the current rule builder. - Validation will fail if the property is null. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - - - - - Defines a 'not empty' validator on the current rule builder. - Validation will fail if the property is null, an empty or the default value for the type (for example, 0 for integers) - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - - - - - Defines a length validator on the current rule builder, but only for string properties. - Validation will fail if the length of the string is outside of the specifed range. The range is inclusive. - - Type of object being validated - The rule builder on which the validator should be defined - - - - - Defines a length validator on the current rule builder, but only for string properties. - Validation will fail if the length of the string is not equal to the length specified. - - Type of object being validated - The rule builder on which the validator should be defined - - - - - Defines a regular expression validator on the current rule builder, but only for string properties. - Validation will fail if the value returned by the lambda does not match the regular expression. - - Type of object being validated - The rule builder on which the validator should be defined - The regular expression to check the value against. - - - - - Defines a regular expression validator on the current rule builder, but only for string properties. - Validation will fail if the value returned by the lambda is not a valid email address. - - Type of object being validated - The rule builder on which the validator should be defined - - - - - Defines a 'not equal' validator on the current rule builder. - Validation will fail if the specified value is equal to the value of the property. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value to compare - Equality comparer to use - - - - - Defines a 'not equal' validator on the current rule builder using a lambda to specify the value. - Validation will fail if the value returned by the lambda is equal to the value of the property. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda expression to provide the comparison value - Equality Comparer to use - - - - - Defines an 'equals' validator on the current rule builder. - Validation will fail if the specified value is not equal to the value of the property. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value to compare - Equality Comparer to use - - - - - Defines an 'equals' validator on the current rule builder using a lambda to specify the comparison value. - Validation will fail if the value returned by the lambda is not equal to the value of the property. - - The type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda expression to provide the comparison value - Equality comparer to use - - - - - Defines a predicate validator on the current rule builder using a lambda expression to specify the predicate. - Validation will fail if the specified lambda returns false. - Validation will succeed if the specifed lambda returns true. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda expression specifying the predicate - - - - - Defines a predicate validator on the current rule builder using a lambda expression to specify the predicate. - Validation will fail if the specified lambda returns false. - Validation will succeed if the specifed lambda returns true. - This overload accepts the object being validated in addition to the property being validated. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda expression specifying the predicate - - - - - Defines a predicate validator on the current rule builder using a lambda expression to specify the predicate. - Validation will fail if the specified lambda returns false. - Validation will succeed if the specifed lambda returns true. - This overload accepts the object being validated in addition to the property being validated. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda expression specifying the predicate - - - - - Defines a 'less than' validator on the current rule builder. - The validation will succeed if the property value is less than the specified value. - The validation will fail if the property value is greater than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than' validator on the current rule builder. - The validation will succeed if the property value is less than the specified value. - The validation will fail if the property value is greater than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than or equal' validator on the current rule builder. - The validation will succeed if the property value is less than or equal to the specified value. - The validation will fail if the property value is greater than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than or equal' validator on the current rule builder. - The validation will succeed if the property value is less than or equal to the specified value. - The validation will fail if the property value is greater than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'greater than' validator on the current rule builder. - The validation will succeed if the property value is greater than the specified value. - The validation will fail if the property value is less than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'greater than' validator on the current rule builder. - The validation will succeed if the property value is greater than the specified value. - The validation will fail if the property value is less than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'greater than or equal' validator on the current rule builder. - The validation will succeed if the property value is greater than or equal the specified value. - The validation will fail if the property value is less than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'greater than or equal' validator on the current rule builder. - The validation will succeed if the property value is greater than or equal the specified value. - The validation will fail if the property value is less than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is less than the specified value. - The validation will fail if the property value is greater than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda that should return the value being compared - - - - - Defines a 'less than' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is less than the specified value. - The validation will fail if the property value is greater than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - A lambda that should return the value being compared - - - - - Defines a 'less than or equal' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is less than or equal to the specified value. - The validation will fail if the property value is greater than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than or equal' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is less than or equal to the specified value. - The validation will fail if the property value is greater than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is greater than the specified value. - The validation will fail if the property value is less than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is greater than the specified value. - The validation will fail if the property value is less than or equal to the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is greater than or equal the specified value. - The validation will fail if the property value is less than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Defines a 'less than' validator on the current rule builder using a lambda expression. - The validation will succeed if the property value is greater than or equal the specified value. - The validation will fail if the property value is less than the specified value. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The value being compared - - - - - Validates certain properties of the specified instance. - - The current validator - The object to validate - Expressions to specify the properties to validate - A ValidationResult object containing any validation failures - - - - Validates certain properties of the specified instance. - - The object to validate - The names of the properties to validate. - A ValidationResult object containing any validation failures. - - - - Performs validation and then throws an exception if validation fails. - - - - - Defines an 'inclusive between' validator on the current rule builder, but only for properties of types that implement IComparable. - Validation will fail if the value of the property is outside of the specifed range. The range is inclusive. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The lowest allowed value - The highest allowed value - - - - - Defines an 'inclusive between' validator on the current rule builder, but only for properties of types that implement IComparable. - Validation will fail if the value of the property is outside of the specifed range. The range is inclusive. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The lowest allowed value - The highest allowed value - - - - - Defines an 'exclusive between' validator on the current rule builder, but only for properties of types that implement IComparable. - Validation will fail if the value of the property is outside of the specifed range. The range is exclusive. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The lowest allowed value - The highest allowed value - - - - - Defines an 'exclusive between' validator on the current rule builder, but only for properties of types that implement IComparable. - Validation will fail if the value of the property is outside of the specifed range. The range is exclusive. - - Type of object being validated - Type of property being validated - The rule builder on which the validator should be defined - The lowest allowed value - The highest allowed value - - - - - Defines a credit card validator for the current rule builder that ensures that the specified string is a valid credit card number. - - - - - Default options that can be used to configure a validator. - - - - - Specifies the cascade mode for failures. - If set to 'Stop' then execution of the rule will stop once the first validator in the chain fails. - If set to 'Continue' then all validators in the chain will execute regardless of failures. - - - - - Specifies a custom action to be invoked when the validator fails. - - - - - - - - - - Specifies a custom error message to use if validation fails. - - The current rule - The error message to use - - - - - Specifies a custom error message to use if validation fails. - - The current rule - The error message to use - Additional arguments to be specified when formatting the custom error message. - - - - - Specifies a custom error message to use if validation fails. - - The current rule - The error message to use - Additional property values to be included when formatting the custom error message. - - - - - Specifies a custom error message resource to use when validation fails. - - The current rule - The resource to use as an expression, eg () => Messages.MyResource - - - - - Specifies a custom error message resource to use when validation fails. - - The current rule - The resource to use as an expression, eg () => Messages.MyResource - Custom message format args - - - - - Specifies a custom error message resource to use when validation fails. - - The current rule - The resource to use as an expression, eg () => Messages.MyResource - Custom message format args - - - - - Specifies a custom error message resource to use when validation fails. - - The current rule - The resource to use as an expression, eg () => Messages.MyResource - The resource accessor builder to use. - - - - - Specifies a custom error code to use when validation fails - - The current rule - The error code to use - - - - - Specifies a condition limiting when the validator should run. - The validator will only be executed if the result of the lambda returns true. - - The current rule - A lambda expression that specifies a condition for when the validator should run - Whether the condition should be applied to the current rule or all rules in the chain - - - - - Specifies a condition limiting when the validator should not run. - The validator will only be executed if the result of the lambda returns false. - - The current rule - A lambda expression that specifies a condition for when the validator should not run - Whether the condition should be applied to the current rule or all rules in the chain - - - - - Specifies a custom property name to use within the error message. - - The current rule - The property name to use - - - - - Specifies a localized name for the error message. - - The current rule - The resource to use as an expression, eg () => Messages.MyResource - Resource accessor builder to use - - - - Overrides the name of the property associated with this rule. - NOTE: This is a considered to be an advanced feature. 99% of the time that you use this, you actually meant to use WithName. - - The current rule - The property name to use - - - - - Specifies custom state that should be stored alongside the validation message when validation fails for this rule. - - - - - - - - - - Specifies how rules should cascade when one fails. - - - - - When a rule fails, execution continues to the next rule. - - - - - When a rule fails, validation is stopped and all other rules in the chain will not be executed. - - - - - Specifies where a When/Unless condition should be applied - - - - - Applies the condition to all validators declared so far in the chain. - - - - - Applies the condition to the current validator only. - - - - - Validator implementation that allows rules to be defined without inheriting from AbstractValidator. - - - - public class Customer { - public int Id { get; set; } - public string Name { get; set; } - - public static readonly InlineValidator<Customer> Validator = new InlineValidator<Customer> { - v => v.RuleFor(x => x.Name).NotNull(), - v => v.RuleFor(x => x.Id).NotEqual(0), - } - } - - - - - - - Delegate that specifies configuring an InlineValidator. - - - - - Allows configuration of the validator. - - - - - Defines a rule associated with a property which can have multiple validators. - - - - - The validators that are grouped under this rule. - - - - - Name of the rule-set to which this rule belongs. - - - - - Performs validation using a validation context and returns a collection of Validation Failures. - - Validation Context - A collection of validation failures - - - - Defines a validator for a particualr type. - - - - - - Validates the specified instance. - - The instance to validate - A ValidationResult object containing any validation failures. - - - - Sets the cascade mode for all rules within this validator. - - - - - Defines a validator for a particular type. - - - - - Validates the specified instance - - - A ValidationResult containing any validation failures - - - - Validates the specified instance. - - A ValidationContext - A ValidationResult object containy any validation failures. - - - - Creates a hook to access various meta data properties - - A IValidatorDescriptor object which contains methods to access metadata - - - - Checks to see whether the validator can validate objects of the specified type - - - - - Provides metadata about a validator. - - - - - Gets the name display name for a property. - - - - - Gets a collection of validators grouped by property. - - - - - Gets validators for a particular property. - - - - - Gets rules for a property. - - - - - Gets validators for a particular type. - - - - - Gets the validator for the specified type. - - - - - Gets the validator for the specified type. - - - - - Builds a delegate for retrieving a localised resource from a resource type and property name. - - - - - Gets a function that can be used to retrieve a message from a resource type and resource name. - - - - - Builds a delegate for retrieving a localised resource from a resource type and property name. - - - - - Builds a function used to retrieve the resource. - - - - - Gets the PropertyInfo for a resource. - ResourceType and ResourceName are ref parameters to allow derived types - to replace the type/name of the resource before the delegate is constructed. - - - - - Implemenetation of IResourceAccessorBuilder that can fall back to the default resource provider. - - - - - Gets the PropertyInfo for a resource. - ResourceType and ResourceName are ref parameters to allow derived types - to replace the type/name of the resource before the delegate is constructed. - - - - - Provides error message templates - - - - - Construct the error message template - - Error message template - - - - The name of the resource if localized. - - - - - The type of the resource provider if localized. - - - - - Represents a localized string. - - - - - Creates a new instance of the LocalizedErrorMessageSource class using the specified resource name and resource type. - - The resource type - The resource name - Strategy used to construct the resource accessor - - - - Creates an IErrorMessageSource from an expression: () => MyResources.SomeResourceName - - The expression - Strategy used to construct the resource accessor - Error message source - - - - Construct the error message template - - Error message template - - - - The name of the resource if localized. - - - - - The type of the resource provider if localized. - - - - - A strongly-typed resource class, for looking up localized strings, etc. - - - - - Returns the cached ResourceManager instance used by this class. - - - - - Overrides the current thread's CurrentUICulture property for all - resource lookups using this strongly typed resource class. - - - - - Looks up a localized string similar to '{PropertyName}' is not a valid credit card number.. - - - - - Looks up a localized string similar to '{PropertyName}' is not a valid email address.. - - - - - Looks up a localized string similar to '{PropertyName}' should be equal to '{PropertyValue}'.. - - - - - Looks up a localized string similar to '{PropertyName}' must be {MaxLength} characters in length. You entered {TotalLength} characters.. - - - - - Looks up a localized string similar to '{PropertyName}' must be between {From} and {To} (exclusive). You entered {Value}.. - - - - - Looks up a localized string similar to '{PropertyName}' must be greater than '{ComparisonValue}'.. - - - - - Looks up a localized string similar to '{PropertyName}' must be greater than or equal to '{ComparisonValue}'.. - - - - - Looks up a localized string similar to '{PropertyName}' must be between {From} and {To}. You entered {Value}.. - - - - - Looks up a localized string similar to '{PropertyName}' must be between {MinLength} and {MaxLength} characters. You entered {TotalLength} characters.. - - - - - Looks up a localized string similar to '{PropertyName}' must be less than '{ComparisonValue}'.. - - - - - Looks up a localized string similar to '{PropertyName}' must be less than or equal to '{ComparisonValue}'.. - - - - - Looks up a localized string similar to '{PropertyName}' should not be empty.. - - - - - Looks up a localized string similar to '{PropertyName}' should not be equal to '{PropertyValue}'.. - - - - - Looks up a localized string similar to '{PropertyName}' must not be empty.. - - - - - Looks up a localized string similar to The specified condition was not met for '{PropertyName}'.. - - - - - Looks up a localized string similar to '{PropertyName}' is not in the correct format.. - - - - - Represents a static string. - - - - - Creates a new StringErrorMessageSource using the specified error message as the error template. - - The error message template. - - - - Construct the error message template - - Error message template - - - - The name of the resource if localized. - - - - - The type of the resource provider if localized. - - - - - Creates a new validation failure. - - - - - Creates a new ValidationFailure. - - - - - The name of the property. - - - - - The error message - - - - - The error code - - - - - The property value that caused the failure. - - - - - Custom state associated with the failure. - - - - - Placeholder values used for string substitution when building ErrorMessage - - - - - Creates a textual representation of the failure. - - - - - Rule builder that starts the chain - - - - - - - Rule builder - - - - - - - Associates a validator with this the property for this rule builder. - - The validator to set - - - - - Associates an instance of IValidator with the current property rule. - - The validator to use - - - - Rule builder - - - - - - - Used for providing metadata about a validator. - - - - - Ensures that the property value is a valid credit card number. - - - - - A custom property validator. - This interface should not be implemented directly in your code as it is subject to change. - Please inherit from PropertyValidator instead. - - - - - Creates an error validation result for this validator. - - The validator context - Returns an error validation result. - - - - Non ASP.NET requests - - - - - - - - ASP.NET requests - - - - - - Non ASP.NET requests - - - - - ASP.NET requests - - - - - Keep default file contents in-memory - - - - - - Creates instance using straight Resolve approach. - This will throw an exception if resolution fails - - - - - Creates instance using the TryResolve approach if tryResolve = true. - Otherwise uses Resolve approach, which will throw an exception if resolution fails - - - - - Sets a persistent cookie which never expires - - - - - Sets a session cookie which expires after the browser session closes - - - - - Deletes a specified cookie by setting its value to empty and expiration to -1 days - - - - - Context to capture IService action - - - - - The number of segments separated by '/' determinable by path.Split('/').Length - e.g. /path/to/here.ext == 3 - - - - - The total number of segments after subparts have been exploded ('.') - e.g. /path/to/here.ext == 4 - - - - - Provide for quick lookups based on hashes that can be determined from a request url - - - - - For performance withPathInfoParts should already be a lower case string - to minimize redundant matching operations. - - - - - - - - For performance withPathInfoParts should already be a lower case string - to minimize redundant matching operations. - - - - - - - - - Execute MQ - - - - - Execute MQ with requestContext - - - - - Execute using empty RequestContext - - - - - Gets the name of the base most type in the heirachy tree with the same. - - We get an exception when trying to create a schema with multiple types of the same name - like when inheriting from a DataContract with the same name. - - The type. - - - - - Wrapper class for the HTTPListener to allow easier access to the - server, for start and stop management and event routing of the actual - inbound requests. - - - - - Starts the Web Service - - - A Uri that acts as the base that the server is listening on. - Format should be: http://127.0.0.1:8080/ or http://127.0.0.1:8080/somevirtual/ - Note: the trailing slash is required! For more info see the - HttpListener.Prefixes property on MSDN. - - - - - Shut down the Web Service - - - - - Overridable method that can be used to implement a custom hnandler - - - - - - Reserves the specified URL for non-administrator users and accounts. - http://msdn.microsoft.com/en-us/library/windows/desktop/cc307223(v=vs.85).aspx - - Reserved Url if the process completes successfully - - - - Used in Unit tests - - - - - - This is a micro-cache; suitable when the number of terms is controllable (a few hundred, for example), - and strictly append-only; you cannot change existing values. All key matches are on **REFERENCE** - equality. The type is fully thread-safe. - - - - - If the underlying command supports BindByName, this sets/clears the underlying - implementation accordingly. This is required to support OracleCommand from dapper-dot-net - - - - - Wraps a database connection, allowing sql execution timings to be collected when a session is started. - - - - - Returns a new that wraps , - providing query execution profiling. If profiler is null, no profiling will occur. - - Your provider-specific flavor of connection, e.g. SqlConnection, OracleConnection - The currently started or null. - Determines whether the ProfiledDbConnection will dispose the underlying connection. - - - - This will be made private; use - - - - - This will be made private; use - - - - - Wrapper for a db provider factory to enable profiling - - - - - Every provider factory must have an Instance public field - - - - - Used for db provider apis internally - - - - - proxy - - - - - proxy - - - - - BaseProfilerProvider. This providers some helper methods which provide access to - internals not otherwise available. - To use, override the , and - methods. - - - - - Starts a new MiniProfiler and sets it to be current. By the end of this method - should return the new MiniProfiler. - - - - - Stops the current MiniProfiler (if any is currently running). - should be called if is false - - If true, any current results will be thrown away and nothing saved - - - - Returns the current MiniProfiler. This is used by . - - - - - - Sets to be active (read to start profiling) - This should be called once a new MiniProfiler has been created. - - The profiler to set to active - If is null - - - - Stops the profiler and marks it as inactive. - - The profiler to stop - True if successful, false if Stop had previously been called on this profiler - If is null - - - - Calls to save the current - profiler using the current storage settings - - - - - - Common extension methods to use only in this project - - - - - Answers true if this String is either null or empty. - - - - - Answers true if this String is neither null or empty. - - - - - Removes trailing / characters from a path and leaves just one - - - - - Removes any leading / characters from a path - - - - - Removes any leading / characters from a path - - - - - Serializes to a json string. - - - - - Gets part of a stack trace containing only methods we care about. - - - - - Gets the current formatted and filted stack trace. - - Space separated list of methods - - - - Identifies users based on ip address. - - - - - Returns the paramter HttpRequest's client ip address. - - - - - A provider used to create instances and maintain the current instance. - - - - - Starts a new MiniProfiler and sets it to be current. By the end of this method - should return the new MiniProfiler. - - - - - Ends the current profiling session, if one exists. - - - When true, clears the for this HttpContext, allowing profiling to - be prematurely stopped and discarded. Useful for when a specific route does not need to be profiled. - - - - - Returns the current MiniProfiler. This is used by . - - - - - - Provides functionality to identify which user is profiling a request. - - - - - Returns a string to identify the user profiling the current 'request'. - - The current HttpRequest being profiled. - - - - A single MiniProfiler can be used to represent any number of steps/levels in a call-graph, via Step() - - Totally baller. - - - - Identifies this Profiler so it may be stored/cached. - - - - - A display name for this profiling session. - - - - - When this profiler was instantiated. - - - - - Where this profiler was run. - - - - - Allows filtering of steps based on what - the steps are created with. - - - - - The first that is created and started when this profiler is instantiated. - All other s will be children of this one. - - - - - A string identifying the user/client that is profiling this request. Set - with an -implementing class to provide a custom value. - - - If this is not set manually at some point, the implementation will be used; - by default, this will be the current request's ip address. - - - - - Returns true when this MiniProfiler has been viewed by the that recorded it. - - - Allows POSTs that result in a redirect to be profiled. implementation - will keep a list of all profilers that haven't been fetched down. - - - - - Starts when this profiler is instantiated. Each step will use this Stopwatch's current ticks as - their starting time. - - - - - For unit testing, returns the timer. - - - - - Milliseconds, to one decimal place, that this MiniProfiler ran. - - - - - Returns true when or any of its are . - - - - - Returns true when all child s are . - - - - - Any Timing step with a duration less than or equal to this will be hidden by default in the UI; defaults to 2.0 ms. - - - - - Ticks since this MiniProfiler was started. - - - - - Json representing the collection of CustomTimings relating to this Profiler - - - Is used when storing the Profiler in SqlStorage - - - - - Points to the currently executing Timing. - - - - - Creates and starts a new MiniProfiler for the root , filtering steps to . - - - - - Returns the 's and this profiler recorded. - - - - - Returns true if Ids match. - - - - - Returns hashcode of Id. - - - - - Obsolete - used for serialization. - - - - - Walks the hierarchy contained in this profiler, starting with , and returns each Timing found. - - - - - Returns milliseconds based on Stopwatch's Frequency. - - - - - Starts a new MiniProfiler based on the current . This new profiler can be accessed by - - - - - - Ends the current profiling session, if one exists. - - - When true, clears the for this HttpContext, allowing profiling to - be prematurely stopped and discarded. Useful for when a specific route does not need to be profiled. - - - - - Returns an that will time the code between its creation and disposal. Use this method when you - do not wish to include the MvcMiniProfiler namespace for the extension method. - - A descriptive name for the code that is encapsulated by the resulting IDisposable's lifetime. - This step's visibility level; allows filtering when is called. - - - - Returns the css and javascript includes needed to display the MiniProfiler results UI. - - Which side of the page the profiler popup button should be displayed on (defaults to left) - Whether to show trivial timings by default (defaults to false) - Whether to show time the time with children column by default (defaults to false) - The maximum number of trace popups to show before removing the oldest (defaults to 15) - xhtml rendering mode, ensure script tag is closed ... etc - when true, shows buttons to minimize and clear MiniProfiler results - Script and link elements normally; an empty string when there is no active profiling session. - - - - Gets the currently running MiniProfiler for the current HttpContext; null if no MiniProfiler was ed. - - - - - Renders the current to json. - - - - - Renders the parameter to json. - - - - - Deserializes the json string parameter to a . - - - - - Create a DEEP clone of this object - - - - - - Contains information about queries executed during this profiling session. - - - - - Returns all currently open commands on this connection - - - - - Milliseconds, to one decimal place, that this MiniProfiler was executing sql. - - - - - Returns true when we have profiled queries. - - - - - Returns true when any child Timings have duplicate queries. - - - - - How many sql data readers were executed in all steps. - - - - - How many sql scalar queries were executed in all steps. - - - - - How many sql non-query statements were executed in all steps. - - - - - Returns all results contained in all child steps. - - - - - Contains any sql statements that are executed, along with how many times those statements are executed. - - - - - Adds to the current . - - - - - Returns the number of sql statements of that were executed in all s. - - - - - Various configuration properties. - - - - - Assemblies to exclude from the stack trace report. - - - - - Types to exclude from the stack trace report. - - - - - Methods to exclude from the stack trace report. - - - - - Excludes the specified assembly from the stack trace output. - - The short name of the assembly. AssemblyName.Name - - - - Excludes the specified type from the stack trace output. - - The System.Type name to exclude - - - - Excludes the specified method name from the stack trace output. - - The name of the method - - - - The max length of the stack string to report back; defaults to 120 chars. - - - - - Any Timing step with a duration less than or equal to this will be hidden by default in the UI; defaults to 2.0 ms. - - - - - Dictates if the "time with children" column is displayed by default, defaults to false. - For a per-page override you can use .RenderIncludes(showTimeWithChildren: true/false) - - - - - Dictates if trivial timings are displayed by default, defaults to false. - For a per-page override you can use .RenderIncludes(showTrivial: true/false) - - - - - Determines how many traces to show before removing the oldest; defaults to 15. - For a per-page override you can use .RenderIncludes(maxTracesToShow: 10) - - - - - Dictates on which side of the page the profiler popup button is displayed; defaults to left. - For a per-page override you can use .RenderIncludes(position: RenderPosition.Left/Right) - - - - - Determines if min-max, clear, etc are rendered; defaults to false. - For a per-page override you can use .RenderIncludes(showControls: true/false) - - - - - By default, SqlTimings will grab a stack trace to help locate where queries are being executed. - When this setting is true, no stack trace will be collected, possibly improving profiler performance. - - - - - When is called, if the current request url contains any items in this property, - no profiler will be instantiated and no results will be displayed. - Default value is { "/ssr-", "/content/", "/scripts/", "/favicon.ico" }. - - - - - The path under which ALL routes are registered in, defaults to the application root. For example, "~/myDirectory/" would yield - "/myDirectory/ssr-includes.js" rather than just "/mini-profiler-includes.js" - Any setting here should be in APP RELATIVE FORM, e.g. "~/myDirectory/" - - - - - Understands how to save and load MiniProfilers. Used for caching between when - a profiling session ends and results can be fetched to the client, and for showing shared, full-page results. - - - The normal profiling session life-cycle is as follows: - 1) request begins - 2) profiler is started - 3) normal page/controller/request execution - 4) profiler is stopped - 5) profiler is cached with 's implementation of - 6) request ends - 7) page is displayed and profiling results are ajax-fetched down, pulling cached results from - 's implementation of - - - - - The formatter applied to the SQL being rendered (used only for UI) - - - - - Assembly version of this dank MiniProfiler. - - - - - The provider used to provider the current instance of a provider - This is also - - - - - A function that determines who can access the MiniProfiler results url. It should return true when - the request client has access, false for a 401 to be returned. HttpRequest parameter is the current request and - MiniProfiler parameter is the results that were profiled. - - - Both the HttpRequest and MiniProfiler parameters that will be passed into this function should never be null. - - - - - Make sure we can at least store profiler results to the http runtime cache. - - - - - Allows switching out stopwatches for unit testing. - - - - - Categorizes individual steps to allow filtering. - - - - - Default level given to Timings. - - - - - Useful when profiling many items in a loop, but you don't wish to always see this detail. - - - - - Dictates on which side of the page the profiler popup button is displayed; defaults to left. - - - - - Profiler popup button is displayed on the left. - - - - - Profiler popup button is displayed on the right. - - - - - Contains helper methods that ease working with null s. - - - - - Wraps in a call and executes it, returning its result. - - The current profiling session or null. - Method to execute and profile. - The step name used to label the profiler results. - - - - - Returns an that will time the code between its creation and disposal. - - The current profiling session or null. - A descriptive name for the code that is encapsulated by the resulting IDisposable's lifetime. - This step's visibility level; allows filtering when is called. - - - - Adds 's hierarchy to this profiler's current Timing step, - allowing other threads, remote calls, etc. to be profiled and joined into this profiling session. - - - - - Returns an html-encoded string with a text-representation of ; returns "" when profiler is null. - - The current profiling session or null. - - - - Formats any SQL query with inline parameters, optionally including the value type - - - - - Creates a new Inline SQL Formatter, optionally including the parameter type info in comments beside the replaced value - - whether to include a comment after the value, indicating the type, e.g. /* @myParam DbType.Int32 */ - - - - Formats the SQL in a generic frieldly format, including the parameter type information in a comment if it was specified in the InlineFormatter constructor - - The SqlTiming to format - A formatted SQL string - - - - Returns a string representation of the parameter's value, including the type - - The parameter to get a value for - - - - - Takes a SqlTiming and returns a formatted SQL string, for parameter replacement, etc. - - - - - Return SQL the way you want it to look on the in the trace. Usually used to format parameters - - - Formatted SQL - - - - NOT IMPLEMENTED - will format statements with paramters in an Oracle friendly way - - - - - Does NOTHING, implement me! - - - - - Formats SQL server queries with a DECLARE up top for parameter values - - - - - Formats the SQL in a SQL-Server friendly way, with DECLARE statements for the parameters up top. - - The SqlTiming to format - A formatted SQL string - - - - Contains helper code to time sql statements. - - - - - The profiling session this SqlProfiler is part of. - - - - - Returns a new SqlProfiler to be used in the 'profiler' session. - - - - - Tracks when 'command' is started. - - - - - Returns all currently open commands on this connection - - - - - Finishes profiling for 'command', recording durations. - - - - - Called when 'reader' finishes its iterations and is closed. - - - - - Helper methods that allow operation on SqlProfilers, regardless of their instantiation. - - - - - Tracks when 'command' is started. - - - - - Finishes profiling for 'command', recording durations. - - - - - Called when 'reader' finishes its iterations and is closed. - - - - - Profiles a single sql execution. - - - - - Unique identifier for this SqlTiming. - - - - - Category of sql statement executed. - - - - - The sql that was executed. - - - - - The command string with special formatting applied based on MiniProfiler.Settings.SqlFormatter - - - - - Roughly where in the calling code that this sql was executed. - - - - - Offset from main MiniProfiler start that this sql began. - - - - - How long this sql statement took to execute. - - - - - When executing readers, how long it took to come back initially from the database, - before all records are fetched and reader is closed. - - - - - Stores any parameter names and values used by the profiled DbCommand. - - - - - Id of the Timing this statement was executed in. - - - Needed for database deserialization. - - - - - The Timing step that this sql execution occurred in. - - - - - True when other identical sql statements have been executed during this MiniProfiler session. - - - - - Creates a new SqlTiming to profile 'command'. - - - - - Obsolete - used for serialization. - - - - - Returns a snippet of the sql command and the duration. - - - - - Returns true if Ids match. - - - - - Returns hashcode of Id. - - - - - Called when command execution is finished to determine this SqlTiming's duration. - - - - - Called when database reader is closed, ending profiling for SqlTimings. - - - - - To help with display, put some space around sammiched commas - - - - - Information about a DbParameter used in the sql statement profiled by SqlTiming. - - - - - Which SqlTiming this Parameter was executed with. - - - - - Parameter name, e.g. "@routeName" - - - - - The value submitted to the database. - - - - - System.Data.DbType, e.g. "String", "Bit" - - - - - How large the type is, e.g. for string, size could be 4000 - - - - - Returns true if this has the same parent , and as . - - - - - Returns the XOR of certain properties. - - - - - Understands how to save MiniProfiler results to a MSSQL database, allowing more permanent storage and - querying of slow results. - - - - - How we connect to the database used to save/load MiniProfiler results. - - - - - Returns a new SqlServerDatabaseStorage object that will insert into the database identified by connectionString. - - - - - Saves 'profiler' to a database under its . - - - - - Returns the MiniProfiler identified by 'id' from the database or null when no MiniProfiler exists under that 'id'. - - - - - Returns a list of s that haven't been seen by . - - User identified by the current . - - - - Returns a DbConnection for your specific provider. - - - - - Returns a DbConnection already opened for execution. - - - - - Understands how to store a to the with absolute expiration. - - - - - The string that prefixes all keys that MiniProfilers are saved under, e.g. - "mini-profiler-ecfb0050-7ce8-4bf1-bf82-2cb38e90e31e". - - - - - How long to cache each for (i.e. the absolute expiration parameter of - ) - - - - - Returns a new HttpRuntimeCacheStorage class that will cache MiniProfilers for the specified duration. - - - - - Saves to the HttpRuntime.Cache under a key concated with - and the parameter's . - - - - - Returns the saved identified by . Also marks the resulting - profiler to true. - - - - - Returns a list of s that haven't been seen by . - - User identified by the current . - - - - Syncs access to runtime cache when adding a new list of ids for a user. - - - - - Provides saving and loading s to a storage medium. - - - - - Stores under its . - - The results of a profiling session. - - Should also ensure the profiler is stored as being unviewed by its profiling . - - - - - Returns a from storage based on , which should map to . - - - Should also update that the resulting profiler has been marked as viewed by its profiling . - - - - - Returns a list of s that haven't been seen by . - - User identified by the current . - - - - An individual profiling step that can contain child steps. - - - - - Unique identifer for this timing; set during construction. - - - - - Text displayed when this Timing is rendered. - - - - - How long this Timing step took in ms; includes any Timings' durations. - - - - - The offset from the start of profiling. - - - - - All sub-steps that occur within this Timing step. Add new children through - - - - - Stores arbitrary key/value strings on this Timing step. Add new tuples through . - - - - - Any queries that occurred during this Timing step. - - - - - Needed for database deserialization and JSON serialization. - - - - - Which Timing this Timing is under - the duration that this step takes will be added to its parent's duration. - - This will be null for the root (initial) Timing. - - - - Rebuilds all the parent timings on deserialization calls - - - - - Gets the elapsed milliseconds in this step without any children's durations. - - - - - Gets the aggregate elapsed milliseconds of all SqlTimings executed in this Timing, excluding Children Timings. - - - - - Returns true when this is less than the configured - , by default 2.0 ms. - - - - - Reference to the containing profiler, allowing this Timing to affect the Head and get Stopwatch readings. - - - - - Offset from parent MiniProfiler's creation that this Timing was created. - - - - - Returns true when this Timing has inner Timing steps. - - - - - Returns true if this Timing step collected sql execution timings. - - - - - Returns true if any s executed in this step are detected as duplicate statements. - - - - - Returns true when this Timing is the first one created in a MiniProfiler session. - - - - - How far away this Timing is from the Profiler's Root. - - - - - How many sql data readers were executed in this Timing step. Does not include queries in any child Timings. - - - - - How many sql scalar queries were executed in this Timing step. Does not include queries in any child Timings. - - - - - How many sql non-query statements were executed in this Timing step. Does not include queries in any child Timings. - - - - - Creates a new Timing named 'name' in the 'profiler's session, with 'parent' as this Timing's immediate ancestor. - - - - - Obsolete - used for serialization. - - - - - Returns this Timing's Name. - - - - - Returns true if Ids match. - - - - - Returns hashcode of Id. - - - - - Adds arbitrary string 'value' under 'key', allowing custom properties to be stored in this Timing step. - - - - - Completes this Timing's duration and sets the MiniProfiler's Head up one level. - - - - - Add the parameter 'timing' to this Timing's Children collection. - - - Used outside this assembly for custom deserialization when creating an implementation. - - - - - Adds the parameter 'sqlTiming' to this Timing's SqlTimings collection. - - A sql statement profiling that was executed in this Timing step. - - Used outside this assembly for custom deserialization when creating an implementation. - - - - - Returns the number of sql statements of that were executed in this . - - - - - Understands how to route and respond to MiniProfiler UI urls. - - - - - Try to keep everything static so we can easily be reused. - - - - - Returns either includes' css/javascript or results' html. - - - - - Handles rendering static content files. - - - - - Handles rendering a previous MiniProfiler session, identified by its "?id=GUID" on the query. - - - - - Embedded resource contents keyed by filename. - - - - - Helper method that sets a proper 404 response code. - - - - - HttpContext based profiler provider. This is the default provider to use in a web context. - The current profiler is associated with a HttpContext.Current ensuring that profilers are - specific to a individual HttpRequest. - - - - - Public constructor. This also registers any UI routes needed to display results - - - - - Starts a new MiniProfiler and associates it with the current . - - - - - Ends the current profiling session, if one exists. - - - When true, clears the for this HttpContext, allowing profiling to - be prematurely stopped and discarded. Useful for when a specific route does not need to be profiled. - - - - - Makes sure 'profiler' has a Name, pulling it from route data or url. - - - - - Returns the current profiler - - - - - - Gets the currently running MiniProfiler for the current HttpContext; null if no MiniProfiler was ed. - - - - - WebRequestProfilerProvider specific configurations - - - - - Provides user identification for a given profiling request. - - - - - Would've preferred to use [assembly: ContractNamespace] attribute but it is not supported in Mono - - - - - More familiar name for the new crowd. - - - - - The tier lets you specify a retrieving a setting with the tier prefix first before falling back to the original key. - E.g a tier of 'Live' looks for 'Live.{Key}' or if not found falls back to '{Key}'. - - - - - Returns string if exists, otherwise null - - - - - - - Gets the nullable app setting. - - - - - Gets the app setting. - - - - - Determines wheter the Config section identified by the sectionName exists. - - - - - Returns AppSetting[key] if exists otherwise defaultValue - - - - - Returns AppSetting[key] if exists otherwise defaultValue, for non-string values - - - - - Gets the connection string setting. - - - - - Gets the connection string. - - - - - Gets the list from app setting. - - - - - Gets the dictionary from app setting. - - - - - Get the static Parse(string) method on the type supplied - - - - - Gets the constructor info for T(string) if exists. - - - - - Returns the value returned by the 'T.Parse(string)' method if exists otherwise 'new T(string)'. - e.g. if T was a TimeSpan it will return TimeSpan.Parse(textValue). - If there is no Parse Method it will attempt to create a new instance of the destined type - - - - - Provides a common interface for Settings providers such as - ConfigurationManager or Azure's RoleEnvironment. The only - requirement is that if the implementation cannot find the - specified key, the return value must be null - - The key for the setting - The string value of the specified key, or null if the key - was invalid - - - - Return all keys in this configuration source. - - - - - - Predefined pattern that matches <?php ... ?> tags. - Could be passed inside a list to {@link #setPreservePatterns(List) setPreservePatterns} method. - - - Predefined pattern that matches <% ... %> tags. - Could be passed inside a list to {@link #setPreservePatterns(List) setPreservePatterns} method. - - - Predefined pattern that matches <--# ... --> tags. - Could be passed inside a list to {@link #setPreservePatterns(List) setPreservePatterns} method. - - - Predefined list of tags that are very likely to be block-level. - Could be passed to {@link #setRemoveSurroundingSpaces(string) setRemoveSurroundingSpaces} method. - - - Predefined list of tags that are block-level by default, excluding <div> and <li> tags. - Table tags are also included. - Could be passed to {@link #setRemoveSurroundingSpaces(string) setRemoveSurroundingSpaces} method. - - - Could be passed to {@link #setRemoveSurroundingSpaces(string) setRemoveSurroundingSpaces} method - to remove all surrounding spaces (not recommended). - - - If set to false all compression will be bypassed. Might be useful for testing purposes. - Default is true. - - @param enabled set false to bypass all compression - - - Gets or Sets JavaScript compressor implementation that will be used - to compress inline JavaScript in HTML. - - - Returns CSS compressor implementation that will be used - to compress inline CSS in HTML. - - - If set to true all HTML comments will be removed. - Default is true. - - @param removeComments set true to remove all HTML comments - - - If set to true all multiple whitespace characters will be replaced with single spaces. - Default is true. - - @param removeMultiSpaces set true to replace all multiple whitespace characters - will single spaces. - - - - - Enables JavaScript compression within <script> tags - if set to true. Default is false for performance reasons. - -

Note: Compressing JavaScript is not recommended if pages are - compressed dynamically on-the-fly because of performance impact. - You should consider putting JavaScript into a separate file and - compressing it using standalone YUICompressor for example.

- - @param compressJavaScript set true to enable JavaScript compression. - Default is false -
- - Enables CSS compression within <style> tags using - Yahoo YUI ICompressor - if set to true. Default is false for performance reasons. - -

Note: Compressing CSS is not recommended if pages are - compressed dynamically on-the-fly because of performance impact. - You should consider putting CSS into a separate file and - compressing it using standalone YUICompressor for example.

- - @param compressCss set true to enable CSS compression. - Default is false -
- - If set to true, existing DOCTYPE declaration will be replaced with simple <!DOCTYPE html> declaration. - Default is false. - - @param simpleDoctype set true to replace existing DOCTYPE declaration with <!DOCTYPE html> - - - - If set to true, type="text/style" attributes will be removed from <style> tags. Default is false. - - @param removeStyleAttributes set true to remove type="text/style" attributes from <style> tags - - - - If set to true, method="get" attributes will be removed from <form> tags. Default is false. - - @param removeFormAttributes set true to remove method="get" attributes from <form> tags - - - If set to true, type="text" attributes will be removed from <input> tags. Default is false. - - @param removeInputAttributes set true to remove type="text" attributes from <input> tags - - - - - - - - - - Returns {@link HtmlCompressorStatistics} object containing statistics of the last HTML compression, if enabled. - Should be called after {@link #compress(string)} - - @return {@link HtmlCompressorStatistics} object containing last HTML compression statistics - - @see HtmlCompressorStatistics - @see #setGenerateStatistics(bool) - - - The main method that compresses given HTML source and returns compressed - result. - - @param html HTML content to compress - @return compressed content. - - - Returns metrics of an uncompressed document - - @return metrics of an uncompressed document - @see HtmlMetrics - - - Returns metrics of a compressed document - - @return metrics of a compressed document - @see HtmlMetrics - - - - Returns total size of blocks that were skipped by the compressor - (for example content inside <pre> tags or inside - <script> tags with disabled javascript compression) - - @return the total size of blocks that were skipped by the compressor, in bytes - - - Returns total filesize of a document - - @return total filesize of a document, in bytes - - - Returns number of empty characters (spaces, tabs, end of lines) in a document - - @return number of empty characters in a document - - - Returns total size of inline <script> tags - - @return total size of inline <script> tags, in bytes - - - Returns total size of inline <style> tags - - @return total size of inline <style> tags, in bytes - - - Returns total size of inline event handlers (onclick, etc) - - @return total size of inline event handlers, in bytes - - - - Provides access to the anti-forgery system, which provides protection against - Cross-site Request Forgery (XSRF, also called CSRF) attacks. - - - - - Generates an anti-forgery token for this request. This token can - be validated by calling the Validate() method. - - An HTML string corresponding to an <input type="hidden"> - element. This element should be put inside a <form>. - - This method has a side effect: it may set a response cookie. - - - - - Generates an anti-forgery token pair (cookie and form token) for this request. - This method is similar to GetHtml(), but this method gives the caller control - over how to persist the returned values. To validate these tokens, call the - appropriate overload of Validate. - - The anti-forgery token - if any - that already existed - for this request. May be null. The anti-forgery system will try to reuse this cookie - value when generating a matching form token. - Will contain a new cookie value if the old cookie token - was null or invalid. If this value is non-null when the method completes, the caller - must persist this value in the form of a response cookie, and the existing cookie value - should be discarded. If this value is null when the method completes, the existing - cookie value was valid and needn't be modified. - The value that should be stored in the <form>. The caller - should take care not to accidentally swap the cookie and form tokens. - - Unlike the GetHtml() method, this method has no side effect. The caller - is responsible for setting the response cookie and injecting the returned - form token as appropriate. - - - - - Validates an anti-forgery token that was supplied for this request. - The anti-forgery token may be generated by calling GetHtml(). - - - Throws an HttpAntiForgeryException if validation fails. - - - - - Validates an anti-forgery token pair that was generated by the GetTokens method. - - The token that was supplied in the request cookie. - The token that was supplied in the request form body. - - Throws an HttpAntiForgeryException if validation fails. - - - - - Provides programmatic configuration for the anti-forgery token system. - - - - - Specifies an object that can provide additional data to put into all - generated tokens and that can validate additional data in incoming - tokens. - - - - - Specifies the name of the cookie that is used by the anti-forgery - system. - - - If an explicit name is not provided, the system will automatically - generate a name. - - - - - Specifies whether SSL is required for the anti-forgery system - to operate. If this setting is 'true' and a non-SSL request - comes into the system, all anti-forgery APIs will fail. - - - - - Specifies whether the anti-forgery system should skip checking - for conditions that might indicate misuse of the system. Please - use caution when setting this switch, as improper use could open - security holes in the application. - - - Setting this switch will disable several checks, including: - - Identity.IsAuthenticated = true without Identity.Name being set - - special-casing claims-based identities - - - - - If claims-based authorization is in use, specifies the claim - type from the identity that is used to uniquely identify the - user. If this property is set, all claims-based identities - must return unique values for this claim type. - - - If claims-based authorization is in use and this property has - not been set, the anti-forgery system will automatically look - for claim types "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/nameidentifier" - and "http://schemas.microsoft.com/accesscontrolservice/2010/07/claims/identityprovider". - - - - - Allows providing or validating additional custom data for anti-forgery tokens. - For example, the developer could use this to supply a nonce when the token is - generated, then he could validate the nonce when the token is validated. - - - The anti-forgery system already embeds the client's username within the - generated tokens. This interface provides and consumes supplemental - data. If an incoming anti-forgery token contains supplemental data but no - additional data provider is configured, the supplemental data will not be - validated. - - - - - Provides additional data to be stored for the anti-forgery tokens generated - during this request. - - Information about the current request. - Supplemental data to embed within the anti-forgery token. - - - - Validates additional data that was embedded inside an incoming anti-forgery - token. - - Information about the current request. - Supplemental data that was embedded within the token. - True if the data is valid; false if the data is invalid. - - - - Initializes a new instance of the class. - - The base scope. - - The dictionary to use as a storage. Since the dictionary would be used as-is, we expect the implementer to - use the same key-value comparison logic as we do here. - - - - - Custom comparer for the context dictionaries - The comparer treats strings as a special case, performing case insesitive comparison. - This guaratees that we remain consistent throughout the chain of contexts since PageData dictionary - behaves in this manner. - - - - - Typed UserSession - - - - - Dynamic Session Bag - - - - - Redirect to the https:// version of this url if not already. - - - - - Don't redirect when in DebugMode - - - - - Don't redirect if the request was a forwarded request, e.g. from a Load Balancer - - - - - Encapsulates creating a new message handler - - - - - Processes all messages in a Normal and Priority Queue. - Expects to be called in 1 thread. i.e. Non Thread-Safe. - - - - - - A convenient repository base class you can inherit from to reduce the boilerplate - with accessing a managed IDbConnection - - - - - A convenient base class for your injected service dependencies that reduces the boilerplate - with managed access to ServiceStack's built-in providers - - - - - Only generate specified Verb entries for "ANY" routes - - - - - Tell ServiceStack to use ThreadStatic Items Collection for RequestScoped items. - Warning: ThreadStatic Items aren't pinned to the same request in async services which callback on different threads. - - - - - Gets a list of items for this request. - - This list will be cleared on every request and is specific to the original thread that is handling the request. - If a handler uses additional threads, this data will not be available on those threads. - - - - - Track any IDisposable's to dispose of at the end of the request in IAppHost.OnEndRequest() - - - - - - Release currently registered dependencies for this request - - true if any dependencies were released - - - - Respond with a 'Soft redirect' so smart clients (e.g. ajax) have access to the response and - can decide whether or not they should redirect - - - - - Decorate the response with an additional client-side event to instruct participating - smart clients (e.g. ajax) with hints to transparently invoke client-side functionality - - - - - Shortcut to get the ResponseDTO whether it's bare or inside a IHttpResult - - - - - - - Alias of AsDto - - - - - Shortcut to get the ResponseDTO whether it's bare or inside a IHttpResult - - - TResponse if found; otherwise null - - - - Alias of AsDto - - - - - Whether the response is an IHttpError or Exception - - - - - rangeHeader should be of the format "bytes=0-" or "bytes=0-12345" or "bytes=123-456" - - - - - Adds 206 PartialContent Status, Content-Range and Content-Length headers - - - - - Writes partial range as specified by start-end, from fromStream to toStream. - - - - - Service error logs are kept in 'urn:ServiceErrors:{ServiceName}' - - - - - Combined service error logs are maintained in 'urn:ServiceErrors:All' - - - - - RequestLogs service Route, default is /requestlogs - - - - - Turn On/Off Session Tracking - - - - - Turn On/Off Logging of Raw Request Body, default is Off - - - - - Turn On/Off Tracking of Responses - - - - - Turn On/Off Tracking of Exceptions - - - - - Size of InMemoryRollingRequestLogger circular buffer - - - - - Limit access to /requestlogs service to these roles - - - - - Change the RequestLogger provider. Default is InMemoryRollingRequestLogger - - - - - Don't log requests of these types. By default RequestLog's are excluded - - - - - Don't log request bodys for services with sensitive information. - By default Auth and Registration requests are hidden. - - - - - Indicates that the request dto, which is associated with this attribute, - requires authentication. - - - - - Restrict authentication to a specific . - For example, if this attribute should only permit access - if the user is authenticated with , - you should set this property to . - - - - - Redirect the client to a specific URL if authentication failed. - If this property is null, simply `401 Unauthorized` is returned. - - - - - Enable the authentication feature and configure the AuthService. - - - - - Removes items from cache that have keys matching the specified wildcard pattern - - Cache client - The wildcard, where "*" means any sequence of characters and "?" means any single character. - - - - Removes items from the cache based on the specified regular expression pattern - - Cache client - Regular expression pattern to search cache keys - - - - Create new instance of CacheEntry. - - - - UTC time at which CacheEntry expires. - - - - Stores The value with key only if such key doesn't exist at the server yet. - - - - - Adds or replaces the value with key. - - - - - Adds or replaces the value with key. - - - - - Replace the value with specified key if it exists. - - - - - Add the value with key to the cache, set to never expire. - - - - - Add or replace the value with key to the cache, set to never expire. - - - - - Replace the value with key in the cache, set to never expire. - - - - - Add the value with key to the cache, set to expire at specified DateTime. - - This method examines the DateTimeKind of expiresAt to determine if conversion to - universal time is needed. The version of Add that takes a TimeSpan expiration is faster - than using this method with a DateTime of Kind other than Utc, and is not affected by - ambiguous local time during daylight savings/standard time transition. - - - - Add or replace the value with key to the cache, set to expire at specified DateTime. - - This method examines the DateTimeKind of expiresAt to determine if conversion to - universal time is needed. The version of Set that takes a TimeSpan expiration is faster - than using this method with a DateTime of Kind other than Utc, and is not affected by - ambiguous local time during daylight savings/standard time transition. - - - - Replace the value with key in the cache, set to expire at specified DateTime. - - This method examines the DateTimeKind of expiresAt to determine if conversion to - universal time is needed. The version of Replace that takes a TimeSpan expiration is faster - than using this method with a DateTime of Kind other than Utc, and is not affected by - ambiguous local time during daylight savings/standard time transition. - - - - Add the value with key to the cache, set to expire after specified TimeSpan. - - - - - Add or replace the value with key to the cache, set to expire after specified TimeSpan. - - - - - Replace the value with key in the cache, set to expire after specified TimeSpan. - - - - - Plugin adds support for Cross-origin resource sharing (CORS, see http://www.w3.org/TR/access-control/). - CORS allows to access resources from different domain which usually forbidden by origin policy. - - - - - Represents a default constructor with Allow Origin equals to "*", Allowed GET, POST, PUT, DELETE, OPTIONS request and allowed "Content-Type" header. - - - - - Attribute marks that specific response class has support for Cross-origin resource sharing (CORS, see http://www.w3.org/TR/access-control/). CORS allows to access resources from different domain which usually forbidden by origin policy. - - - - - Represents a default constructor with Allow Origin equals to "*", Allowed GET, POST, PUT, DELETE, OPTIONS request and allowed "Content-Type" header. - - - - - Change the default HTML view or template used for the HTML response of this service - - - - - End a ServiceStack Request - - - - - End a ServiceStack Request - - - - - End a HttpHandler Request - - - - - End a HttpHandler Request - - - - - End an MQ Request - - - - - End a ServiceStack Request with no content - - - - - Resolve an alternate Web Service from ServiceStack's IOC container. - - - - - - - Enable the Registration feature and configure the RegistrationService. - - - - - Base class to create request filter attributes only for specific HTTP methods (GET, POST...) - - - - - Creates a new - - Defines when the filter should be executed - - - - This method is only executed if the HTTP method matches the property. - - The http request wrapper - The http response wrapper - The request DTO - - - - Create a ShallowCopy of this instance. - - - - - - Registers the type in the IoC container and - adds auto-wiring to the specified type. - - - - - - - Registers a named instance of type in the IoC container and - adds auto-wiring to the specified type. - - - - - - - Registers the type in the IoC container and - adds auto-wiring to the specified type. - The reuse scope is set to none (transient). - - - - - - Registers the type in the IoC container and - adds auto-wiring to the specified type. - The reuse scope is set to none (transient). - - - - - - Registers the types in the IoC container and - adds auto-wiring to the specified types. - The reuse scope is set to none (transient). - - - - - - Register a singleton instance as a runtime type - - - - - Indicates that the request dto, which is associated with this attribute, - can only execute, if the user has specific permissions. - - - - - Indicates that the request dto, which is associated with this attribute, - can only execute, if the user has specific roles. - - - - - Check all session is in all supplied roles otherwise a 401 HttpError is thrown - - - - - - - Indicates that the request dto, which is associated with this attribute, - can only execute, if the user has specific permissions. - - - - - Indicates that the request dto, which is associated with this attribute, - can only execute, if the user has any of the specified roles. - - - - - Check all session is in any supplied roles otherwise a 401 HttpError is thrown - - - - - - - Base class to create response filter attributes only for specific HTTP methods (GET, POST...) - - - - - Creates a new - - Defines when the filter should be executed - - - - This method is only executed if the HTTP method matches the property. - - The http request wrapper - The http response wrapper - The response DTO - - - - Create a ShallowCopy of this instance. - - - - - - Generic + Useful IService base class - - - - - Cascading collection of virtual file sources, inc. Embedded Resources, File System, In Memory, S3 - - - - - Read/Write Virtual FileSystem. Defaults to FileSystemVirtualPathProvider - - - - - Dynamic Session Bag - - - - - Typed UserSession - - - - - If they don't have an ICacheClient configured use an In Memory one. - - - - - Lets you Register new Services and the optional restPaths will be registered against - this default Request Type - - - - - Retain the same behavior as ASP.NET and redirect requests to directores - without a trailing '/' - - - - - The AppHost.Container. Note: it is not thread safe to register dependencies after AppStart. - - - - - Executed immediately before a Service is executed. Use return to change the request DTO used, must be of the same type. - - - - - Executed immediately after a service is executed. Use return to change response used. - - - - - Occurs when the Service throws an Exception. - - - - - Occurs when an exception is thrown whilst processing a request. - - - - - Apply PreRequest Filters for participating Custom Handlers, e.g. RazorFormat, MarkdownFormat, etc - - - - - Apply PreAuthenticate Filters from IAuthWithRequest AuthProviders - - - - - Applies the raw request filters. Returns whether or not the request has been handled - and no more processing should be done. - - - - - - Applies the request filters. Returns whether or not the request has been handled - and no more processing should be done. - - - - - - Applies the response filters. Returns whether or not the request has been handled - and no more processing should be done. - - - - - - Inspect or modify ever new UserSession created or resolved from cache. - return null if Session is invalid to create new Session. - - - - - Naming convention for the ResponseStatus property name on the response DTO - - - - - Create an instance of the service response dto type and inject it with the supplied responseStatus - - - - - - - - - - - - - - - - - Override to provide additional/less context about the Service Exception. - By default the request is serialized and appended to the ResponseStatus StackTrace. - - - - - Changes the links for the servicestack/metadata page - - - - - Scans the supplied Assemblies to infer REST paths and HTTP verbs. - - The instance. - - The assemblies with REST services. - - The same instance; - never . - - - - Configure ServiceStack to have ISession support - - - - - Create the active Session or Permanent Session Id cookie. - - - - - - Create both Permanent and Session Id cookies and return the active sessionId - - - - - - This class interecepts 401 requests and changes them to 402 errors. When this happens the FormAuthentication module - will no longer hijack it and redirect back to login because it is a 402 error, not a 401. - When the request ends, this class sets the status code back to 401 and everything works as it should. - - PathToSupress is the path inside your website where the above swap should happen. - - If you can build for .net 4.5, you do not have to do this swap. You can take advantage of a new flag (SuppressFormsAuthenticationRedirect) - that tells the FormAuthenticationModule to not redirect, which also means you will not need the EndRequest code. - - - - - Converts the validation result to an error result which will be serialized by ServiceStack in a clean and human-readable way. - - The validation result - - - - - Converts the validation result to an error exception which will be serialized by ServiceStack in a clean and human-readable way - if the returned exception is thrown. - - The validation result - - - - - Creates a new instance of the RulesetValidatorSelector. - - - - - Determines whether or not a rule should execute. - - The rule - Property path (eg Customer.Address.Line1) - Contextual information - Whether or not the validator can execute. - - - - Activate the validation mechanism, so every request DTO with an existing validator - will be validated. - - The app host - - - - Override to provide additional/less context about the Service Exception. - By default the request is serialized and appended to the ResponseStatus StackTrace. - - - - - Auto-scans the provided assemblies for a - and registers it in the provided IoC container. - - The IoC container - The assemblies to scan for a validator - - - - In Memory Virtual Path Provider. - - - - - Refresh file stats for this node if supported - - - - - Get an IAppHost container. - Note: Registering dependencies should only be done during setup/configuration - stage and remain immutable there after for thread-safety. - - - - - - - Reference to MarkdownViewEngine - - - - - The AppHost so you can access configuration and resolve dependencies, etc. - - - - - This precompiled Markdown page with Metadata - - - - - ASP.NET MVC's HtmlHelper - - - - - All variables passed to and created by your page. - The Response DTO is stored and accessible via the 'Model' variable. - - All variables and outputs created are stored in ScopeArgs which is what's available - to your website template. The Generated page is stored in the 'Body' variable. - - - - - Whether HTML or Markdown output is requested - - - - - The Response DTO - - - - - Ensure the same instance is used for subclasses - - - - - Called before page is executed - - - - - Called after page is executed but before it's merged with the - website template if any. - - - - - Don't HTML encode safe output - - - - - - - Return the output of a different view with the specified name - using the supplied model - - - - - - - - Resolve registered Assemblies - - - - - - A strongly-typed resource class, for looking up localized strings, etc. - - - - - Returns the cached ResourceManager instance used by this class. - - - - - Overrides the current thread's CurrentUICulture property for all - resource lookups using this strongly typed resource class. - - - - - Looks up a localized string similar to Container service is built-in and read-only.. - - - - - Looks up a localized string similar to Service type {0} does not inherit or implement {1}.. - - - - - Looks up a localized string similar to Required dependency of type {0} named '{1}' could not be resolved.. - - - - - Looks up a localized string similar to Required dependency of type {0} could not be resolved.. - - - - - Looks up a localized string similar to Unknown scope.. - - - - - Gets string value from Items[name] then Cookies[name] if exists. - Useful when *first* setting the users response cookie in the request filter. - To access the value for this initial request you need to set it in Items[]. - - string value or null if it doesn't exist - - - - Gets request paramater string value by looking in the following order: - - QueryString[name] - - FormData[name] - - Cookies[name] - - Items[name] - - string value or null if it doesn't exist - - - * - Input: http://localhost:96/Cambia3/Temp/Test.aspx/path/info?q=item#fragment - - Some HttpRequest path and URL properties: - Request.ApplicationPath: /Cambia3 - Request.CurrentExecutionFilePath: /Cambia3/Temp/Test.aspx - Request.FilePath: /Cambia3/Temp/Test.aspx - Request.Path: /Cambia3/Temp/Test.aspx/path/info - Request.PathInfo: /path/info - Request.PhysicalApplicationPath: D:\Inetpub\wwwroot\CambiaWeb\Cambia3\ - Request.QueryString: /Cambia3/Temp/Test.aspx/path/info?query=arg - Request.Url.AbsolutePath: /Cambia3/Temp/Test.aspx/path/info - Request.Url.AbsoluteUri: http://localhost:96/Cambia3/Temp/Test.aspx/path/info?query=arg - Request.Url.Fragment: - Request.Url.Host: localhost - Request.Url.LocalPath: /Cambia3/Temp/Test.aspx/path/info - Request.Url.PathAndQuery: /Cambia3/Temp/Test.aspx/path/info?query=arg - Request.Url.Port: 96 - Request.Url.Query: ?query=arg - Request.Url.Scheme: http - Request.Url.Segments: / - Cambia3/ - Temp/ - Test.aspx/ - path/ - info - * - - - - Duplicate Params are given a unique key by appending a #1 suffix - - - - - Duplicate params have their values joined together in a comma-delimited string - - - - - Use this to treat Request.Items[] as a cache by returning pre-computed items to save - calculating them multiple times. - - - - - Sets a persistent cookie which never expires - - - - - Sets a session cookie which expires after the browser session closes - - - - - Sets a persistent cookie which expires after the given time - - - - - Sets a persistent cookie with an expiresAt date - - - - - Deletes a specified cookie by setting its value to empty and expiration to -1 days - - - - - Returns the optimized result for the IRequestContext. - Does not use or store results in any cache. - - - - - - - - Overload for the method returning the most - optimized result based on the MimeType and CompressionType from the IRequestContext. - - - - - Overload for the method returning the most - optimized result based on the MimeType and CompressionType from the IRequestContext. - How long to cache for, null is no expiration - - - - - Clears all the serialized and compressed caches set - by the 'Resolve' method for the cacheKey provided - - - - - - - - Store an entry in the IHttpRequest.Items Dictionary - - - - - Get an entry from the IHttpRequest.Items Dictionary - - - - - Inherit from this class if you want to host your web services inside an - ASP.NET application. - - - - - Exécute les tâches définies par l'application associées à la libération ou à la redéfinition des ressources non managées. - - 2 - - - - Render Markdown for text/markdown and text/plain ContentTypes - - - - - Load Embedded Resource Templates in ServiceStack. - To get ServiceStack to use your own instead just add a copy of one or more of the following to your Web Root: - ~/Templates/IndexOperations.html - ~/Templates/OperationControl.html - ~/Templates/HtmlFormat.html - - - - - Highly optimized code to find if GZIP is supported from: - - http://dotnetperls.com/gzip-request - - Other resources for GZip, deflate resources: - - http://www.west-wind.com/Weblog/posts/10564.aspx - - http://www.west-wind.com/WebLog/posts/102969.aspx - - ICSharpCode - - - - - Writes to response. - Response headers are customizable by implementing IHasOptions an returning Dictionary of Http headers. - - The response. - Whether or not it was implicity handled by ServiceStack's built-in handlers. - The default action. - The serialization context. - Add prefix to response body if any - Add suffix to response body if any - - - - - Inherit from this class if you want to host your web services inside a - Console Application, Windows Service, etc. - - Usage of HttpListener allows you to host webservices on the same port (:80) as IIS - however it requires admin user privillages. - - - - - Read/Write Virtual FileSystem. Defaults to FileSystemVirtualPathProvider - - - - - Cascading collection of virtual file sources, inc. Embedded Resources, File System, In Memory, S3 - - - - - Call to signal the completion of a ServiceStack-handled Request - - - - - Resolves and auto-wires a ServiceStack Service from a ASP.NET HttpContext. - - - - - Resolves and auto-wires a ServiceStack Service from a HttpListenerContext. - - - - - Resolves and auto-wires a ServiceStack Service. - - - - - ASP.NET or HttpListener ServiceStack host - - - - - Register dependency in AppHost IOC on Startup - - - - - AutoWired Registration of an interface with a concrete type in AppHost IOC on Startup. - - - - - Allows the clean up for executed autowired services and filters. - Calls directly after services and filters are executed. - - - - - Called at the end of each request. Enables Request Scope. - - - - - Register user-defined custom routes. - - - - - Inferred Metadata available from existing services - - - - - Register custom ContentType serializers - - - - - Add Request Filters, to be applied before the dto is deserialized - - - - - Add Request Converter to convert Request DTO's - - - - - Add Response Converter to convert Response DTO's - - - - - Add Request Filters for HTTP Requests - - - - - Add Response Filters for HTTP Responses - - - - - Add Request Filters for MQ/TCP Requests - - - - - Add Response Filters for MQ/TCP Responses - - - - - Add Request Filter for a specific Request DTO Type - - - - - Add Request Filter for a specific Response DTO Type - - - - - Add Request Filter for a specific MQ Request DTO Type - - - - - Add Request Filter for a specific MQ Response DTO Type - - - - - Add Request Filter for Service Gateway Requests - - - - - Add Response Filter for Service Gateway Responses - - - - - Add alternative HTML View Engines - - - - - Provide an exception handler for unhandled exceptions - - - - - Provide an exception handler for un-caught exceptions - - - - - Provide callbacks to be fired after the AppHost has finished initializing - - - - - Provide callbacks to be fired when AppHost is being disposed - - - - - Skip the ServiceStack Request Pipeline and process the returned IHttpHandler instead - - - - - Provide a catch-all handler that doesn't match any routes - - - - - Use a fall-back Error Handler for handling global errors - - - - - Use a Custom Error Handler for handling specific error HttpStatusCodes - - - - - Provide a custom model minder for a specific Request DTO - - - - - The AppHost config - - - - - The AppHost AppSettings. Defaults to App or Web.config appSettings. - - - - - Register an Adhoc web service on Startup - - - - - Register all Services in Assembly - - - - - List of pre-registered and user-defined plugins to be enabled in this AppHost - - - - - Apply plugins to this AppHost - - - - - Cascading number of file sources, inc. Embedded Resources, File System, In Memory, S3 - - - - - Read/Write Virtual FileSystem. Defaults to FileSystemVirtualPathProvider - - - - - Create a service runner for IService actions - - - - - Resolve the absolute url for this request - - - - - Resolve localized text, returns itself by default. - The Request is provided when exists. - - - - - Callback for Plugins to register necessary handlers with ServiceStack - - - - - Callback to pre-configure any logic before IPlugin.Register() is fired - - - - - Callback to post-configure any logic after IPlugin.Register() is fired - - - - - Callback for AuthProviders to register callbacks with AuthFeature - - - - - This class stores the caller call context in order to restore - it when the work item is executed in the thread pool environment. - - - - - Constructor - - - - - Captures the current thread context - - - - - - Applies the thread context stored earlier - - - - - - EventWaitHandleFactory class. - This is a static class that creates AutoResetEvent and ManualResetEvent objects. - In WindowCE the WaitForMultipleObjects API fails to use the Handle property - of XxxResetEvent. It can use only handles that were created by the CreateEvent API. - Consequently this class creates the needed XxxResetEvent and replaces the handle if - it's a WindowsCE OS. - - - - - Create a new AutoResetEvent object - - Return a new AutoResetEvent object - - - - Create a new ManualResetEvent object - - Return a new ManualResetEvent object - - - - An internal delegate to call when the WorkItem starts or completes - - - - - This method is intent for internal use. - - - - - PriorityQueue class - This class is not thread safe because we use external lock - - - - - The number of queues, there is one for each type of priority - - - - - Work items queues. There is one for each type of priority - - - - - The total number of work items within the queues - - - - - Use with IEnumerable interface - - - - - Enqueue a work item. - - A work item - - - - Dequeque a work item. - - Returns the next work item - - - - Find the next non empty queue starting at queue queueIndex+1 - - The index-1 to start from - - The index of the next non empty queue or -1 if all the queues are empty - - - - - The number of work items - - - - - Clear all the work items - - - - - Returns an enumerator to iterate over the work items - - Returns an enumerator - - - - The class the implements the enumerator - - - - - Summary description for STPPerformanceCounter. - - - - - Holds a callback delegate and the state for that delegate. - - - - - Indicates the state of the work item in the thread pool - - - - - Callback delegate for the callback. - - - - - State with which to call the callback delegate. - - - - - Stores the caller's context - - - - - Holds the result of the mehtod - - - - - Hold the exception if the method threw it - - - - - Hold the state of the work item - - - - - A ManualResetEvent to indicate that the result is ready - - - - - A reference count to the _workItemCompleted. - When it reaches to zero _workItemCompleted is Closed - - - - - Represents the result state of the work item - - - - - Work item info - - - - - Called when the WorkItem starts - - - - - Called when the WorkItem completes - - - - - A reference to an object that indicates whatever the - WorkItemsGroup has been canceled - - - - - A reference to an object that indicates whatever the - SmartThreadPool has been canceled - - - - - The work item group this work item belong to. - - - - - The thread that executes this workitem. - This field is available for the period when the work item is executed, before and after it is null. - - - - - The absulote time when the work item will be timeout - - - - - Stores how long the work item waited on the stp queue - - - - - Stores how much time it took the work item to execute after it went out of the queue - - - - - Initialize the callback holding object. - - The workItemGroup of the workitem - The WorkItemInfo of te workitem - Callback delegate for the callback. - State with which to call the callback delegate. - - We assume that the WorkItem object is created within the thread - that meant to run the callback - - - - Change the state of the work item to in progress if it wasn't canceled. - - - Return true on success or false in case the work item was canceled. - If the work item needs to run a post execute then the method will return true. - - - - - Execute the work item and the post execute - - - - - Execute the work item - - - - - Runs the post execute callback - - - - - Set the result of the work item to return - - The result of the work item - The exception that was throw while the workitem executed, null - if there was no exception. - - - - Returns the work item result - - The work item result - - - - Wait for all work items to complete - - Array of work item result objects - The number of milliseconds to wait, or Timeout.Infinite (-1) to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the wait if needed - - true when every work item in waitableResults has completed; otherwise false. - - - - - Waits for any of the work items in the specified array to complete, cancel, or timeout - - Array of work item result objects - The number of milliseconds to wait, or Timeout.Infinite (-1) to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the wait if needed - - The array index of the work item result that satisfied the wait, or WaitTimeout if no work item result satisfied the wait and a time interval equivalent to millisecondsTimeout has passed or the work item has been canceled. - - - - - Fill an array of wait handles with the work items wait handles. - - An array of work item results - An array of wait handles to fill - - - - Release the work items' wait handles - - An array of work item results - - - - Sets the work item's state - - The state to set the work item to - - - - Signals that work item has been completed or canceled - - Indicates that the work item has been canceled - - - - Cancel the work item if it didn't start running yet. - - Returns true on success or false if the work item is in progress or already completed - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits for the result, timeout, or cancel. - In case of error the method throws and exception - - The result of the work item - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits for the result, timeout, or cancel. - In case of error the e argument is filled with the exception - - The result of the work item - - - - A wait handle to wait for completion, cancel, or timeout - - - - - Returns true when the work item has completed or canceled - - - - - Returns true when the work item has canceled - - - - - Returns the priority of the work item - - - - - A back reference to the work item - - - - - Return the result, same as GetResult() - - - - - Returns the exception if occured otherwise returns null. - This value is valid only after the work item completed, - before that it is always null. - - - - - Create a new work item - - The WorkItemsGroup of this workitem - Work item group start information - A callback to execute - Returns a work item - - - - Create a new work item - - The WorkItemsGroup of this workitem - Work item group start information - A callback to execute - The priority of the work item - Returns a work item - - - - Create a new work item - - The WorkItemsGroup of this workitem - Work item group start information - Work item info - A callback to execute - Returns a work item - - - - Create a new work item - - The WorkItemsGroup of this workitem - Work item group start information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - Returns a work item - - - - Create a new work item - - The work items group - Work item group start information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - The work item priority - Returns a work item - - - - Create a new work item - - The work items group - Work item group start information - Work item information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - Returns a work item - - - - Create a new work item - - The work items group - Work item group start information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Returns a work item - - - - Create a new work item - - The work items group - Work item group start information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - The work item priority - Returns a work item - - - - Create a new work item - - The work items group - Work item group start information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Indicates on which cases to call to the post execute callback - Returns a work item - - - - Create a new work item - - The work items group - Work item group start information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Indicates on which cases to call to the post execute callback - The work item priority - Returns a work item - - - - Summary description for WorkItemsGroup. - - - - - A reference to the SmartThreadPool instance that created this - WorkItemsGroup. - - - - - The OnIdle event - - - - - A flag to indicate if the Work Items Group is now suspended. - - - - - Defines how many work items of this WorkItemsGroup can run at once. - - - - - Priority queue to hold work items before they are passed - to the SmartThreadPool. - - - - - Indicate how many work items are waiting in the SmartThreadPool - queue. - This value is used to apply the concurrency. - - - - - Indicate how many work items are currently running in the SmartThreadPool. - This value is used with the Cancel, to calculate if we can send new - work items to the STP. - - - - - WorkItemsGroup start information - - - - - Signaled when all of the WorkItemsGroup's work item completed. - - - - - A common object for all the work items that this work items group - generate so we can mark them to cancel in O(1) - - - - - WorkItemsGroup start information - - - - - Start the Work Items Group if it was started suspended - - - - - Wait for the thread pool to be idle - - - - - Contains the name of this instance of SmartThreadPool. - Can be changed by the user. - - - - - Get/Set the name of the SmartThreadPool/WorkItemsGroup instance - - - - - Cancel all the work items. - Same as Cancel(false) - - - - - Wait for the SmartThreadPool/WorkItemsGroup to be idle - - - - - Wait for the SmartThreadPool/WorkItemsGroup to be idle - - - - - IsIdle is true when there are no work items running or queued. - - - - - Queue a work item - - A callback to execute - Returns a work item result - - - - Queue a work item - - A callback to execute - The priority of the work item - Returns a work item result - - - - Queue a work item - - Work item info - A callback to execute - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - The work item priority - Returns a work item result - - - - Queue a work item - - Work item information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - The work item priority - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Indicates on which cases to call to the post execute callback - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Indicates on which cases to call to the post execute callback - The work item priority - Returns a work item result - - - - WorkItemsQueue class. - - - - - Waiters queue (implemented as stack). - - - - - Waiters count - - - - - Work items queue - - - - - Indicate that work items are allowed to be queued - - - - - Each thread in the thread pool keeps its own waiter entry. - - - - - A flag that indicates if the WorkItemsQueue has been disposed. - - - - - Returns the current number of work items in the queue - - - - - Returns the current number of waiters - - - - - Enqueue a work item to the queue. - - - - - Waits for a work item or exits on timeout or cancel - - Timeout in milliseconds - Cancel wait handle - Returns true if the resource was granted - - - - Cleanup the work items queue, hence no more work - items are allowed to be queue - - - - - Returns the WaiterEntry of the current thread - - - In order to avoid creation and destuction of WaiterEntry - objects each thread has its own WaiterEntry object. - - - - Push a new waiter into the waiter's stack - - A waiter to put in the stack - - - - Pop a waiter from the waiter's stack - - Returns the first waiter in the stack - - - - Remove a waiter from the stack - - A waiter entry to remove - If true the waiter count is always decremented - - - - Event to signal the waiter that it got the work item. - - - - - Flag to know if this waiter already quited from the queue - because of a timeout. - - - - - Flag to know if the waiter was signaled and got a work item. - - - - - A work item that passed directly to the waiter withou going - through the queue - - - - - Signal the waiter that it got a work item. - - Return true on success - The method fails if Timeout() preceded its call - - - - Mark the wait entry that it has been timed out - - Return true on success - The method fails if Signal() preceded its call - - - - Reset the wait entry so it can be used again - - - - - Free resources - - - - - Represents an exception in case IWorkItemResult.GetResult has been canceled - - - Represents an exception in case IWorkItemResult.GetResult has been canceled - - - - - Represents an exception in case IWorkItemResult.GetResult has been timed out - - - Represents an exception in case IWorkItemResult.GetResult has been timed out - - - - - Represents an exception in case IWorkItemResult.GetResult has been timed out - - - Represents an exception in case IWorkItemResult.GetResult has been timed out - - - - - A delegate that represents the method to run as the work item - - A state object for the method to run - - - - A delegate to call after the WorkItemCallback completed - - The work item result object - - - - A delegate to call after the WorkItemCallback completed - - The work item result object - - - - A delegate to call when a WorkItemsGroup becomes idle - - A reference to the WorkItemsGroup that became idle - - - - A delegate to call after a thread is created, but before - it's first use. - - - - - A delegate to call when a thread is about to exit, after - it is no longer belong to the pool. - - - - - Defines the availeable priorities of a work item. - The higher the priority a work item has, the sooner - it will be executed. - - - - - IWorkItemsGroup interface - Created by SmartThreadPool.CreateWorkItemsGroup() - - - - - Get/Set the name of the WorkItemsGroup - - - - - Get/Set the maximum number of workitem that execute cocurrency on the thread pool - - - - - Get the number of work items waiting in the queue. - - - - - Get an array with all the state objects of the currently running items. - The array represents a snap shot and impact performance. - - - - - Get the WorkItemsGroup start information - - - - - Starts to execute work items - - - - - Cancel all the work items. - Same as Cancel(false) - - - - - Cancel all work items using thread abortion - - True to stop work items by raising ThreadAbortException - - - - Wait for all work item to complete. - - - - - Wait for all work item to complete, until timeout expired - - How long to wait for the work items to complete - Returns true if work items completed within the timeout, otherwise false. - - - - Wait for all work item to complete, until timeout expired - - How long to wait for the work items to complete in milliseconds - Returns true if work items completed within the timeout, otherwise false. - - - - IsIdle is true when there are no work items running or queued. - - - - - This event is fired when all work items are completed. - (When IsIdle changes to true) - This event only work on WorkItemsGroup. On SmartThreadPool - it throws the NotImplementedException. - - - - - Queue a work item - - A callback to execute - Returns a work item result - - - - Queue a work item - - A callback to execute - The priority of the work item - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - The work item priority - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - The work item priority - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Indicates on which cases to call to the post execute callback - Returns a work item result - - - - Queue a work item - - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - - A delegate to call after the callback completion - - Indicates on which cases to call to the post execute callback - The work item priority - Returns a work item result - - - - Queue a work item - - Work item info - A callback to execute - Returns a work item result - - - - Queue a work item - - Work item information - A callback to execute - - The context object of the work item. Used for passing arguments to the work item. - - Returns a work item result - - - - Queue a work item. - - Returns a IWorkItemResult object, but its GetResult() will always return null - - - - Queue a work item. - - Returns a IWorkItemResult object, but its GetResult() will always return null - - - - Queue a work item. - - Returns a IWorkItemResult object, but its GetResult() will always return null - - - - Queue a work item. - - Returns a IWorkItemResult object, but its GetResult() will always return null - - - - Queue a work item. - - Returns a IWorkItemResult object, but its GetResult() will always return null - - - - Queue a work item. - - Returns a IWorkItemResult<TResult> object. - its GetResult() returns a TResult object - - - - Queue a work item. - - Returns a IWorkItemResult<TResult> object. - its GetResult() returns a TResult object - - - - Queue a work item. - - Returns a IWorkItemResult<TResult> object. - its GetResult() returns a TResult object - - - - Queue a work item. - - Returns a IWorkItemResult<TResult> object. - its GetResult() returns a TResult object - - - - Queue a work item. - - Returns a IWorkItemResult<TResult> object. - its GetResult() returns a TResult object - - - - Never call to the PostExecute call back - - - - - Call to the PostExecute only when the work item is cancelled - - - - - Call to the PostExecute only when the work item is not cancelled - - - - - Always call to the PostExecute - - - - - The common interface of IWorkItemResult and IWorkItemResult<T> - - - - - This method intent is for internal use. - - - - - - This method intent is for internal use. - - - - - - IWorkItemResult interface. - Created when a WorkItemCallback work item is queued. - - - - - IWorkItemResult<TResult> interface. - Created when a Func<TResult> work item is queued. - - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits. - - The result of the work item - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout. - - The result of the work item - On timeout throws WorkItemTimeoutException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout. - - The result of the work item - On timeout throws WorkItemTimeoutException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout or until the cancelWaitHandle is signaled. - - Timeout in milliseconds, or -1 for infinite - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the blocking if needed - The result of the work item - On timeout throws WorkItemTimeoutException - On cancel throws WorkItemCancelException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout or until the cancelWaitHandle is signaled. - - The result of the work item - On timeout throws WorkItemTimeoutException - On cancel throws WorkItemCancelException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits. - - Filled with the exception if one was thrown - The result of the work item - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout. - - - - Filled with the exception if one was thrown - The result of the work item - On timeout throws WorkItemTimeoutException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout. - - - Filled with the exception if one was thrown - - The result of the work item - On timeout throws WorkItemTimeoutException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout or until the cancelWaitHandle is signaled. - - Timeout in milliseconds, or -1 for infinite - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the blocking if needed - Filled with the exception if one was thrown - The result of the work item - On timeout throws WorkItemTimeoutException - On cancel throws WorkItemCancelException - - - - Get the result of the work item. - If the work item didn't run yet then the caller waits until timeout or until the cancelWaitHandle is signaled. - - The result of the work item - - Filled with the exception if one was thrown - - - On timeout throws WorkItemTimeoutException - On cancel throws WorkItemCancelException - - - - Gets an indication whether the asynchronous operation has completed. - - - - - Gets an indication whether the asynchronous operation has been canceled. - - - - - Gets the user-defined object that contains context data - for the work item method. - - - - - Same as Cancel(false). - - - - - Cancel the work item execution. - If the work item is in the queue then it won't execute - If the work item is completed, it will remain completed - If the work item is in progress then the user can check the SmartThreadPool.IsWorkItemCanceled - property to check if the work item has been cancelled. If the abortExecution is set to true then - the Smart Thread Pool will send an AbortException to the running thread to stop the execution - of the work item. When an in progress work item is canceled its GetResult will throw WorkItemCancelException. - If the work item is already cancelled it will remain cancelled - - When true send an AbortException to the executing thread. - Returns true if the work item was not completed, otherwise false. - - - - Get the work item's priority - - - - - Return the result, same as GetResult() - - - - - Returns the exception if occured otherwise returns null. - - - - - Smart thread pool class. - - - - - Default minimum number of threads the thread pool contains. (0) - - - - - Default maximum number of threads the thread pool contains. (25) - - - - - Default idle timeout in milliseconds. (One minute) - - - - - Indicate to copy the security context of the caller and then use it in the call. (false) - - - - - Indicate to copy the HTTP context of the caller and then use it in the call. (false) - - - - - Indicate to dispose of the state objects if they support the IDispose interface. (false) - - - - - The default option to run the post execute (CallToPostExecute.Always) - - - - - The default post execute method to run. (None) - When null it means not to call it. - - - - - The default work item priority (WorkItemPriority.Normal) - - - - - The default is to work on work items as soon as they arrive - and not to wait for the start. (false) - - - - - The default name to use for the performance counters instance. (null) - - - - - The default thread priority (ThreadPriority.Normal) - - - - - The default thread pool name. (SmartThreadPool) - - - - - The default Max Stack Size. (SmartThreadPool) - - - - - The default fill state with params. (false) - It is relevant only to QueueWorkItem of Action<...>/Func<...> - - - - - The default thread backgroundness. (true) - - - - - The default apartment state of a thread in the thread pool. - The default is ApartmentState.Unknown which means the STP will not - set the apartment of the thread. It will use the .NET default. - - - - - Dictionary of all the threads in the thread pool. - - - - - Queue of work items. - - - - - Count the work items handled. - Used by the performance counter. - - - - - Number of threads that currently work (not idle). - - - - - Stores a copy of the original STPStartInfo. - It is used to change the MinThread and MaxThreads - - - - - Total number of work items that are stored in the work items queue - plus the work items that the threads in the pool are working on. - - - - - Signaled when the thread pool is idle, i.e. no thread is busy - and the work items queue is empty - - - - - An event to signal all the threads to quit immediately. - - - - - A flag to indicate if the Smart Thread Pool is now suspended. - - - - - A flag to indicate the threads to quit. - - - - - Counts the threads created in the pool. - It is used to name the threads. - - - - - Indicate that the SmartThreadPool has been disposed - - - - - Holds all the WorkItemsGroup instaces that have at least one - work item int the SmartThreadPool - This variable is used in case of Shutdown - - - - - A common object for all the work items int the STP - so we can mark them to cancel in O(1) - - - - - Windows STP performance counters - - - - - Local STP performance counters - - - - - An event to call after a thread is created, but before - it's first use. - - - - - An event to call when a thread is about to exit, after - it is no longer belong to the pool. - - - - - A reference to the current work item a thread from the thread pool - is executing. - - - - - Constructor - - - - - Constructor - - Idle timeout in milliseconds - - - - Constructor - - Idle timeout in milliseconds - Upper limit of threads in the pool - - - - Constructor - - Idle timeout in milliseconds - Upper limit of threads in the pool - Lower limit of threads in the pool - - - - Constructor - - A SmartThreadPool configuration that overrides the default behavior - - - - Waits on the queue for a work item, shutdown, or timeout. - - - Returns the WaitingCallback or null in case of timeout or shutdown. - - - - - Put a new work item in the queue - - A work item to queue - - - - Inform that the current thread is about to quit or quiting. - The same thread may call this method more than once. - - - - - Starts new threads - - The number of threads to start - - - - A worker thread method that processes work items from the work items queue. - - - - - Force the SmartThreadPool to shutdown - - - - - Force the SmartThreadPool to shutdown with timeout - - - - - Empties the queue of work items and abort the threads in the pool. - - - - - Wait for all work items to complete - - Array of work item result objects - - true when every work item in workItemResults has completed; otherwise false. - - - - - Wait for all work items to complete - - Array of work item result objects - The number of milliseconds to wait, or a TimeSpan that represents -1 milliseconds to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - - true when every work item in workItemResults has completed; otherwise false. - - - - - Wait for all work items to complete - - Array of work item result objects - The number of milliseconds to wait, or a TimeSpan that represents -1 milliseconds to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the wait if needed - - true when every work item in workItemResults has completed; otherwise false. - - - - - Wait for all work items to complete - - Array of work item result objects - The number of milliseconds to wait, or Timeout.Infinite (-1) to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - - true when every work item in workItemResults has completed; otherwise false. - - - - - Wait for all work items to complete - - Array of work item result objects - The number of milliseconds to wait, or Timeout.Infinite (-1) to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the wait if needed - - true when every work item in workItemResults has completed; otherwise false. - - - - - Waits for any of the work items in the specified array to complete, cancel, or timeout - - Array of work item result objects - - The array index of the work item result that satisfied the wait, or WaitTimeout if any of the work items has been canceled. - - - - - Waits for any of the work items in the specified array to complete, cancel, or timeout - - Array of work item result objects - The number of milliseconds to wait, or a TimeSpan that represents -1 milliseconds to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - - The array index of the work item result that satisfied the wait, or WaitTimeout if no work item result satisfied the wait and a time interval equivalent to millisecondsTimeout has passed or the work item has been canceled. - - - - - Waits for any of the work items in the specified array to complete, cancel, or timeout - - Array of work item result objects - The number of milliseconds to wait, or a TimeSpan that represents -1 milliseconds to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the wait if needed - - The array index of the work item result that satisfied the wait, or WaitTimeout if no work item result satisfied the wait and a time interval equivalent to millisecondsTimeout has passed or the work item has been canceled. - - - - - Waits for any of the work items in the specified array to complete, cancel, or timeout - - Array of work item result objects - The number of milliseconds to wait, or Timeout.Infinite (-1) to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - - The array index of the work item result that satisfied the wait, or WaitTimeout if no work item result satisfied the wait and a time interval equivalent to millisecondsTimeout has passed or the work item has been canceled. - - - - - Waits for any of the work items in the specified array to complete, cancel, or timeout - - Array of work item result objects - The number of milliseconds to wait, or Timeout.Infinite (-1) to wait indefinitely. - - true to exit the synchronization domain for the context before the wait (if in a synchronized context), and reacquire it; otherwise, false. - - A cancel wait handle to interrupt the wait if needed - - The array index of the work item result that satisfied the wait, or WaitTimeout if no work item result satisfied the wait and a time interval equivalent to millisecondsTimeout has passed or the work item has been canceled. - - - - - Creates a new WorkItemsGroup. - - The number of work items that can be run concurrently - A reference to the WorkItemsGroup - - - - Creates a new WorkItemsGroup. - - The number of work items that can be run concurrently - A WorkItemsGroup configuration that overrides the default behavior - A reference to the WorkItemsGroup - - - - This event is fired when a thread is created. - Use it to initialize a thread before the work items use it. - - - - - This event is fired when a thread is terminating. - Use it for cleanup. - - - - - Get/Set the lower limit of threads in the pool. - - - - - Get/Set the upper limit of threads in the pool. - - - - - Get the number of threads in the thread pool. - Should be between the lower and the upper limits. - - - - - Get the number of busy (not idle) threads in the thread pool. - - - - - Returns true if the current running work item has been cancelled. - Must be used within the work item's callback method. - The work item should sample this value in order to know if it - needs to quit before its completion. - - - - - Checks if the work item has been cancelled, and if yes then abort the thread. - Can be used with Cancel and timeout - - - - - Thread Pool start information (readonly) - - - - - Return the local calculated performance counters - Available only if STPStartInfo.EnableLocalPerformanceCounters is true. - - - - - Get/Set the maximum number of work items that execute cocurrency on the thread pool - - - - - Get the number of work items in the queue. - - - - - Get an array with all the state objects of the currently running items. - The array represents a snap shot and impact performance. - - - - - WorkItemsGroup start information (readonly) - - - - - Start the thread pool if it was started suspended. - If it is already running, this method is ignored. - - - - - Cancel all work items using thread abortion - - True to stop work items by raising ThreadAbortException - - - - Wait for the thread pool to be idle - - - - - This event is fired when all work items are completed. - (When IsIdle changes to true) - This event only work on WorkItemsGroup. On SmartThreadPool - it throws the NotImplementedException. - - - - - Executes all actions in parallel. - Returns when they all finish. - - Actions to execute - - - - Executes all actions in parallel. - Returns when they all finish. - - Actions to execute - - - - Executes all actions in parallel - Returns when the first one completes - - Actions to execute - - - - Executes all actions in parallel - Returns when the first one completes - - Actions to execute - - - - Executes actions in sequence asynchronously. - Returns immediately. - - A state context that passes - Actions to execute in the order they should run - - - - Executes actions in sequence asynchronously. - Returns immediately. - - - Actions to execute in the order they should run - - - - The thread creation time - The value is stored as UTC value. - - - - - The last time this thread has been running - It is updated by IAmAlive() method - The value is stored as UTC value. - - - - - A reference from each thread in the thread pool to its SmartThreadPool - object container. - With this variable a thread can know whatever it belongs to a - SmartThreadPool. - - - - - A reference to the current work item a thread from the thread pool - is executing. - - - - - Summary description for STPStartInfo. - - - - - Get/Set the idle timeout in milliseconds. - If a thread is idle (starved) longer than IdleTimeout then it may quit. - - - - - Get/Set the lower limit of threads in the pool. - - - - - Get/Set the upper limit of threads in the pool. - - - - - Get/Set the scheduling priority of the threads in the pool. - The Os handles the scheduling. - - - - - Get/Set the thread pool name. Threads will get names depending on this. - - - - - Get/Set the performance counter instance name of this SmartThreadPool - The default is null which indicate not to use performance counters at all. - - - - - Enable/Disable the local performance counter. - This enables the user to get some performance information about the SmartThreadPool - without using Windows performance counters. (Useful on WindowsCE, Silverlight, etc.) - The default is false. - - - - - Get/Set backgroundness of thread in thread pool. - - - - - Get a readonly version of this STPStartInfo. - - Returns a readonly reference to this STPStartInfo - - - - Get/Set the apartment state of threads in the thread pool - - - - - Get/Set the max stack size of threads in the thread pool - - - - - Summary description for WIGStartInfo. - - - - - Get/Set if to use the caller's security context - - - - - Get/Set if to use the caller's HTTP context - - - - - Get/Set if to dispose of the state object of a work item - - - - - Get/Set the run the post execute options - - - - - Get/Set the default post execute callback - - - - - Get/Set if the work items execution should be suspended until the Start() - method is called. - - - - - Get/Set the default priority that a work item gets when it is enqueued - - - - - Get/Set the if QueueWorkItem of Action<...>/Func<...> fill the - arguments as an object array into the state of the work item. - The arguments can be access later by IWorkItemResult.State. - - - - - Get a readonly version of this WIGStartInfo - - Returns a readonly reference to this WIGStartInfoRO - - - - Summary description for WorkItemInfo. - - - - - Get/Set if to use the caller's security context - - - - - Get/Set if to use the caller's HTTP context - - - - - Get/Set if to dispose of the state object of a work item - - - - - Get/Set the run the post execute options - - - - - Get/Set the post execute callback - - - - - Get/Set the work item's priority - - - - - Get/Set the work item's timout in milliseconds. - This is a passive timout. When the timout expires the work item won't be actively aborted! - - - - - Main container class for components, supporting container hierarchies and - lifetime management of instances. - - - - - Initializes a new empty container. - - - - - Default owner for new registrations. by default. - - - - - Default reuse scope for new registrations. by default. - - - - - Creates a child container of the current one, which exposes its - current service registration to the new child container. - - - - - Disposes the container and all instances owned by it (see - ), as well as all child containers - created through . - - - - - Registers a service instance with the container. This instance - will have and - behavior. - Service instance to use. - - - - Registers a named service instance with the container. This instance - will have and - behavior. - Name of the service to register.Service instance to use. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service when needed. - Type of the service to retrieve.The function that can resolve to the service instance when invoked.The requested service has not been registered previously. - - - - - - - - - - - - - - - - - - - - - - Retrieves a function that can be used to lazily resolve an instance - of the service with the given name when needed. - Type of the service to retrieve.Name of the service to retrieve.The function that can resolve to the service instance with the given name when invoked.The requested service with the given name has not been registered previously. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service of the given type, name and service constructor arguments when needed. - Name of the service to retrieve.The function that can resolve to the service instance with the given and service constructor arguments name when invoked.The requested service with the given name and constructor arguments has not been registered previously. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service of the given type, name and service constructor arguments when needed. - Name of the service to retrieve.The function that can resolve to the service instance with the given and service constructor arguments name when invoked.The requested service with the given name and constructor arguments has not been registered previously. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service of the given type, name and service constructor arguments when needed. - Name of the service to retrieve.The function that can resolve to the service instance with the given and service constructor arguments name when invoked.The requested service with the given name and constructor arguments has not been registered previously. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service of the given type, name and service constructor arguments when needed. - Name of the service to retrieve.The function that can resolve to the service instance with the given and service constructor arguments name when invoked.The requested service with the given name and constructor arguments has not been registered previously. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service of the given type, name and service constructor arguments when needed. - Name of the service to retrieve.The function that can resolve to the service instance with the given and service constructor arguments name when invoked.The requested service with the given name and constructor arguments has not been registered previously. - - - - Retrieves a function that can be used to lazily resolve an instance - of the service of the given type, name and service constructor arguments when needed. - Name of the service to retrieve.The function that can resolve to the service instance with the given and service constructor arguments name when invoked.The requested service with the given name and constructor arguments has not been registered previously. - - - - Registers the given service by providing a factory delegate to - instantiate it. - The service type to register.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.Fourth argument that should be passed to the factory delegate to create the instace.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.Fourth argument that should be passed to the factory delegate to create the instace.Fifth argument that should be passed to the factory delegate to create the instace.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.Fourth argument that should be passed to the factory delegate to create the instace.Fifth argument that should be passed to the factory delegate to create the instace.Sixth argument that should be passed to the factory delegate to create the instace.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate to - instantiate it. - The service type to register.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.Fourth argument that should be passed to the factory delegate to create the instace.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.Fourth argument that should be passed to the factory delegate to create the instace.Fifth argument that should be passed to the factory delegate to create the instace.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Registers the given named service by providing a factory delegate that receives arguments to - instantiate it. - The service type to register.First argument that should be passed to the factory delegate to create the instace.Second argument that should be passed to the factory delegate to create the instace.Third argument that should be passed to the factory delegate to create the instace.Fourth argument that should be passed to the factory delegate to create the instace.Fifth argument that should be passed to the factory delegate to create the instace.Sixth argument that should be passed to the factory delegate to create the instace.A name used to differenciate this service registration.The factory delegate to initialize new instances of the service when needed.The registration object to perform further configuration via its fluent interface. - - - - Resolves the given service by type, without passing any arguments for - its construction. - Type of the service to retrieve.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace.Sixth argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, without passing arguments for its initialization. - Type of the service to retrieve.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Resolves the given service by type and name, passing the given arguments - for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace.Sixth argument to pass to the factory delegate that may create the instace.The resolved service instance.The given service could not be resolved. - - - - Attempts to resolve the given service by type, without passing arguments for its initialization. - Type of the service to retrieve. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace.Sixth argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, without passing - arguments arguments for its initialization. - Type of the service to retrieve. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Attempts to resolve the given service by type and name, passing the - given arguments arguments for its initialization. - Type of the service to retrieve.First argument to pass to the factory delegate that may create the instace.Second argument to pass to the factory delegate that may create the instace.Third argument to pass to the factory delegate that may create the instace.Fourth argument to pass to the factory delegate that may create the instace.Fifth argument to pass to the factory delegate that may create the instace.Sixth argument to pass to the factory delegate that may create the instace. - The resolved service instance or if it cannot be resolved. - - - - - Register an autowired dependency - - - - - - Register an autowired dependency - - Name of dependency - - - - - Register an autowired dependency as a separate type - - - - - - Register an autowired dependency as a separate type - - - - - - Alias for RegisterAutoWiredAs - - - - - - Alias for RegisterAutoWiredAs - - - - - - Auto-wires an existing instance, - ie all public properties are tried to be resolved. - - - - - - Generates a function which creates and auto-wires . - - - - - - - - Auto-wires an existing instance of a specific type. - The auto-wiring progress is also cached to be faster - when calling next time with the same type. - - - - - - Encapsulates a method that has five parameters and returns a value of the - type specified by the parameter. - - - - - Encapsulates a method that has six parameters and returns a value of the - type specified by the parameter. - - - - - Encapsulates a method that has seven parameters and returns a value of the - type specified by the parameter. - - - - - Helper interface used to hide the base - members from the fluent API to make for much cleaner - Visual Studio intellisense experience. - - - - - - - - - - - - - - - - - Funqlets are a set of components provided as a package - to an existing container (like a module). - - - - - Configure the given container with the - registrations provided by the funqlet. - - Container to register. - - - - Interface used by plugins to contribute registrations - to an existing container. - - - - - Determines who is responsible for disposing instances - registered with a container. - - - - - Container should dispose provided instances when it is disposed. This is the - default. - - - - - Container does not dispose provided instances. - - - - - Default owner, which equals . - - - - - Exception thrown by the container when a service cannot be resolved. - - - - - Initializes the exception with the service that could not be resolved. - - - - - Initializes the exception with the service (and its name) that could not be resolved. - - - - - Initializes the exception with an arbitrary message. - - - - - Determines visibility and reuse of instances provided by the container. - - - - - Instances are reused within a container hierarchy. Instances - are created (if necessary) in the container where the registration - was performed, and are reused by all descendent containers. - - - - - Instances are reused only at the given container. Descendent - containers do not reuse parent container instances and get - a new instance at their level. - - - - - Each request to resolve the dependency will result in a new - instance being returned. - - - - - Instaces are reused within the given request - - - - - Default scope, which equals . - - - - - Ownership setting for the service. - - - - - Reuse scope setting for the service. - - - - - The container where the entry was registered. - - - - - Specifies the owner for instances, which determines how - they will be disposed. - - - - - Specifies the scope for instances, which determines - visibility of instances across containers and hierarchies. - - - - - The Func delegate that creates instances of the service. - - - - - The cached service instance if the scope is or - . - - - - - The Func delegate that initializes the object after creation. - - - - - Clones the service entry assigning the to the - . Does not copy the . - - - - - Fluent API for customizing the registration of a service. - - - - - Fluent API for customizing the registration of a service. - - - - - Fluent API that allows registering an initializer for the - service. - - - - - Specifies an initializer that should be invoked after - the service instance has been created by the factory. - - - - - Fluent API that exposes both - and owner (). - - - - - Fluent API that allows specifying the reuse instances. - - - - - Specifies how instances are reused within a container or hierarchy. Default - scope is . - - - - - Fluent API that allows specifying the owner of instances - created from a registration. - - - - - Specifies the owner of instances created from this registration. Default - owner is . - - - - - when true, (most) bare plain URLs are auto-hyperlinked - WARNING: this is a significant deviation from the markdown spec - - - - - when true, RETURN becomes a literal newline - WARNING: this is a significant deviation from the markdown spec - - - - - use ">" for HTML output, or " />" for XHTML output - - - - - when true, problematic URL characters like [, ], (, and so forth will be encoded - WARNING: this is a significant deviation from the markdown spec - - - - - when false, email addresses will never be auto-linked - WARNING: this is a significant deviation from the markdown spec - - - - - when true, bold and italic require non-word characters on either side - WARNING: this is a significant deviation from the markdown spec - - - - - Markdown is a text-to-HTML conversion tool for web writers. - Markdown allows you to write using an easy-to-read, easy-to-write plain text format, - then convert it to structurally valid XHTML (or HTML). - - - - - Create a new Markdown instance using default options - - - - - Create a new Markdown instance and optionally load options from a configuration - file. There they should be stored in the appSettings section, available options are: - - Markdown.StrictBoldItalic (true/false) - Markdown.EmptyElementSuffix (">" or " />" without the quotes) - Markdown.LinkEmails (true/false) - Markdown.AutoNewLines (true/false) - Markdown.AutoHyperlink (true/false) - Markdown.EncodeProblemUrlCharacters (true/false) - - - - - - Create a new Markdown instance and set the options from the MarkdownOptions object. - - - - - use ">" for HTML output, or " />" for XHTML output - - - - - when false, email addresses will never be auto-linked - WARNING: this is a significant deviation from the markdown spec - - - - - when true, bold and italic require non-word characters on either side - WARNING: this is a significant deviation from the markdown spec - - - - - when true, RETURN becomes a literal newline - WARNING: this is a significant deviation from the markdown spec - - - - - when true, (most) bare plain URLs are auto-hyperlinked - WARNING: this is a significant deviation from the markdown spec - - - - - when true, problematic URL characters like [, ], (, and so forth will be encoded - WARNING: this is a significant deviation from the markdown spec - - - - - maximum nested depth of [] and () supported by the transform; implementation detail - - - - - Tabs are automatically converted to spaces as part of the transform - this constant determines how "wide" those tabs become in spaces - - - - - In the static constuctor we'll initialize what stays the same across all transforms. - - - - - current version of MarkdownSharp; - see http://code.google.com/p/markdownsharp/ for the latest code or to contribute - - - - - Transforms the provided Markdown-formatted text to HTML; - see http://en.wikipedia.org/wiki/Markdown - - - The order in which other subs are called here is - essential. Link and image substitutions need to happen before - EscapeSpecialChars(), so that any *'s or _'s in the a - and img tags get encoded. - - - - - Perform transformations that form block-level tags like paragraphs, headers, and list items. - - - - - Perform transformations that occur *within* block-level tags like paragraphs, headers, and list items. - - - - - splits on two or more newlines, to form "paragraphs"; - each paragraph is then unhashed (if it is a hash and unhashing isn't turned off) or wrapped in HTML p tag - - - - - Reusable pattern to match balanced [brackets]. See Friedl's - "Mastering Regular Expressions", 2nd Ed., pp. 328-331. - - - - - Reusable pattern to match balanced (parens). See Friedl's - "Mastering Regular Expressions", 2nd Ed., pp. 328-331. - - - - - Strips link definitions from text, stores the URLs and titles in hash references. - - - ^[id]: url "optional title" - - - - - derived pretty much verbatim from PHP Markdown - - - - - replaces any block-level HTML blocks with hash entries - - - - - returns an array of HTML tokens comprising the input string. Each token is - either a tag (possibly with nested, tags contained therein, such - as <a href="<MTFoo>">, or a run of text between tags. Each element of the - array is a two-element array; the first is either 'tag' or 'text'; the second is - the actual value. - - - - - Turn Markdown link shortcuts into HTML anchor tags - - - [link text](url "title") - [link text][id] - [id] - - - - - Turn Markdown image shortcuts into HTML img tags. - - - ![alt text][id] - ![alt text](url "optional title") - - - - - Turn Markdown headers into HTML header tags - - - Header 1 - ======== - - Header 2 - -------- - - # Header 1 - ## Header 2 - ## Header 2 with closing hashes ## - ... - ###### Header 6 - - - - - Turn Markdown horizontal rules into HTML hr tags - - - *** - * * * - --- - - - - - - - - - Turn Markdown lists into HTML ul and ol and li tags - - - - - Process the contents of a single ordered or unordered list, splitting it - into individual list items. - - - - - /// Turn Markdown 4-space indented code into HTML pre code blocks - - - - - Turn Markdown `code spans` into HTML code tags - - - - - Turn Markdown *italics* and **bold** into HTML strong and em tags - - - - - Turn markdown line breaks (two space at end of line) into HTML break tags - - - - - Turn Markdown > quoted blocks into HTML blockquote blocks - - - - - Turn angle-delimited URLs into HTML anchor tags - - - <http://www.example.com> - - - - - Remove one level of line-leading spaces - - - - - encodes email address randomly - roughly 10% raw, 45% hex, 45% dec - note that @ is always encoded and : never is - - - - - Encode/escape certain Markdown characters inside code blocks and spans where they are literals - - - - - Encode any ampersands (that aren't part of an HTML entity) and left or right angle brackets - - - - - Encodes any escaped characters such as \`, \*, \[ etc - - - - - swap back in all the special characters we've hidden - - - - - escapes Bold [ * ] and Italic [ _ ] characters - - - - - hex-encodes some unusual "problem" chars in URLs to avoid URL detection problems - - - - - Within tags -- meaning between < and > -- encode [\ ` * _] so they - don't conflict with their use in Markdown for code, italics and strong. - We're replacing each such character with its corresponding hash - value; this is likely overkill, but it should prevent us from colliding - with the escape values by accident. - - - - - convert all tabs to _tabWidth spaces; - standardizes line endings from DOS (CR LF) or Mac (CR) to UNIX (LF); - makes sure text ends with a couple of newlines; - removes any blank lines (only spaces) in the text - - - - - this is to emulate what's evailable in PHP - - - - diff --git a/lib/tests/ServiceStack.dll b/lib/tests/ServiceStack.dll deleted file mode 100644 index 61ae1732..00000000 Binary files a/lib/tests/ServiceStack.dll and /dev/null differ diff --git a/src/.nuget/NuGet.config b/src/.nuget/NuGet.config deleted file mode 100644 index 6ae40b3b..00000000 --- a/src/.nuget/NuGet.config +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/src/.nuget/NuGet.exe b/src/.nuget/NuGet.exe deleted file mode 100644 index ed2b0a22..00000000 Binary files a/src/.nuget/NuGet.exe and /dev/null differ diff --git a/src/.nuget/NuGet.targets b/src/.nuget/NuGet.targets deleted file mode 100644 index 38f6d8c2..00000000 --- a/src/.nuget/NuGet.targets +++ /dev/null @@ -1,77 +0,0 @@ - - - - $(MSBuildProjectDirectory)\..\ - - - - - $([System.IO.Path]::Combine($(SolutionDir), ".nuget")) - $([System.IO.Path]::Combine($(ProjectDir), "packages.config")) - $([System.IO.Path]::Combine($(SolutionDir), "packages")) - - - - - $(SolutionDir).nuget - packages.config - $(SolutionDir)packages - - - - - $(NuGetToolsPath)\NuGet.exe - "$(NuGetExePath)" - mono --runtime=v4.0.30319 $(NuGetExePath) - - $(TargetDir.Trim('\\')) - - - "" - - - false - - - false - - - $(NuGetCommand) install "$(PackagesConfig)" -source $(PackageSources) -o "$(PackagesDir)" - $(NuGetCommand) pack "$(ProjectPath)" -p Configuration=$(Configuration) -o "$(PackageOutputDir)" -symbols - - - - RestorePackages; - $(BuildDependsOn); - - - - - $(BuildDependsOn); - BuildPackage; - - - - - - - - - - - - - - - - - - - - diff --git a/src/Directory.Build.props b/src/Directory.Build.props new file mode 100644 index 00000000..2a6ba4e0 --- /dev/null +++ b/src/Directory.Build.props @@ -0,0 +1,54 @@ + + + + 6.0.3 + ServiceStack + ServiceStack, Inc. + © 2008-2018 ServiceStack, Inc + true + https://github.com/ServiceStack/ServiceStack.Redis + https://servicestack.net/terms + https://servicestack.net/img/logo-64.png + https://docs.servicestack.net/release-notes-history + git + https://github.com/ServiceStack/ServiceStack.Redis.git + embedded + latest + true + true + false + + + + true + true + + + + $(DefineConstants);NETFX;NET45;NET472 + True + False + ../servicestack.snk + + + + $(DefineConstants);NETSTANDARD;NETSTANDARD2_0 + + + + $(DefineConstants);NET6_0;NET6_0_OR_GREATER + + + + $(DefineConstants);NETCORE;NETCORE_SUPPORT + + + + + + + + DEBUG + + + diff --git a/src/ServiceStack.Common/project.json b/src/ServiceStack.Common/project.json deleted file mode 100644 index eca0797e..00000000 --- a/src/ServiceStack.Common/project.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": [ - "DEBUG", - "TRACE" - ] - } - }, - "Release": { - "buildOptions": { - "define": [ - "TRACE" - ], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0", - "ServiceStack.Interfaces": "1.0.*", - "ServiceStack.Text": "1.0.*" - }, - "frameworks": { - "netstandard1.3": { - "buildOptions": { - "define": [ - "NETSTANDARD1_1" - ] - }, - "dependencies": {}, - "bin": { - "assembly": "../../lib/netcore/ServiceStack.Common.dll", - "pdb": "../../lib/netcore/ServiceStack.Common.pdb" - } - } - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Interfaces/project.json b/src/ServiceStack.Interfaces/project.json deleted file mode 100644 index 1e98e841..00000000 --- a/src/ServiceStack.Interfaces/project.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": [ - "DEBUG", - "TRACE" - ] - } - }, - "Release": { - "buildOptions": { - "define": [ - "TRACE" - ], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0" - }, - "frameworks": { - "netstandard1.1": { - "dependencies": {}, - "bin": { - "assembly": "../../lib/netcore/ServiceStack.Interfaces.dll", - "pdb": "../../lib/netcore/ServiceStack.Interfaces.pdb" - } - } - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Redis.NetCore/ServiceStack.Common/project.json b/src/ServiceStack.Redis.NetCore/ServiceStack.Common/project.json deleted file mode 100644 index 291579c1..00000000 --- a/src/ServiceStack.Redis.NetCore/ServiceStack.Common/project.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": ["DEBUG", "TRACE"] - } - }, - "Release": { - "buildOptions": { - "define": ["TRACE"], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0", - "ServiceStack.Interfaces" : "1.0.*", - "ServiceStack.Text" : "1.0.*" - }, - "frameworks": { - "netstandard1.3": { - "buildOptions" : { - "define": ["NETSTANDARD1_1"] - }, - "dependencies" : { - }, - "bin" : { "assembly":"../../../lib/netcore/ServiceStack.Common.dll", "pdb" : "../../../lib/netcore/ServiceStack.Common.pdb" } - } - - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Redis.NetCore/ServiceStack.Interfaces/project.json b/src/ServiceStack.Redis.NetCore/ServiceStack.Interfaces/project.json deleted file mode 100644 index 0cdca2a9..00000000 --- a/src/ServiceStack.Redis.NetCore/ServiceStack.Interfaces/project.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": ["DEBUG", "TRACE"] - } - }, - "Release": { - "buildOptions": { - "define": ["TRACE"], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0" - }, - "frameworks": { - "netstandard1.1": { - "dependencies" : { - }, - "bin" : { "assembly":"../../../lib/netcore/ServiceStack.Interfaces.dll", "pdb" : "../../../lib/netcore/ServiceStack.Interfaces.pdb" } - } - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Redis.NetCore/ServiceStack.Redis/project.json b/src/ServiceStack.Redis.NetCore/ServiceStack.Redis/project.json deleted file mode 100644 index df09b7e1..00000000 --- a/src/ServiceStack.Redis.NetCore/ServiceStack.Redis/project.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": ["DEBUG", "TRACE"] - } - }, - "Release": { - "buildOptions": { - "define": ["TRACE"], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0", - "ServiceStack.Interfaces" : "1.0.*", - "ServiceStack.Text" : "1.0.*", - "ServiceStack.Common" : "1.0.*" - }, - "compile": ["../../ServiceStack.Redis/**/*.cs"], - "frameworks": { - "netstandard1.3": { - "dependencies" : { - "System.Net.Security" : "4.0.0", - "System.Collections.Specialized": "4.0.1", - "System.Collections.NonGeneric": "4.0.1", - "System.Net.NameResolution" : "4.0.0" - } - } - - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Redis.NetCore/ServiceStack.Text/project.json b/src/ServiceStack.Redis.NetCore/ServiceStack.Text/project.json deleted file mode 100644 index 247c737d..00000000 --- a/src/ServiceStack.Redis.NetCore/ServiceStack.Text/project.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": ["DEBUG", "TRACE"] - } - }, - "Release": { - "buildOptions": { - "define": ["TRACE"], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0" - }, - "frameworks": { - "netstandard1.1": { - "dependencies" : { - }, - "bin" : { "assembly":"../../../lib/netcore/ServiceStack.Text.dll", "pdb" : "../../../lib/netcore/ServiceStack.Text.pdb" } - } - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Redis.sln b/src/ServiceStack.Redis.sln index 8e91447b..c2ea43b7 100644 --- a/src/ServiceStack.Redis.sln +++ b/src/ServiceStack.Redis.sln @@ -1,81 +1,103 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2012 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29721.120 +MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Build", "Build", "{38F69F8F-9303-4BAF-B081-D28339163E07}" ProjectSection(SolutionItems) = preProject - ..\build\appsettings.license.txt = ..\build\appsettings.license.txt - ..\build\build-sn.proj = ..\build\build-sn.proj + ..\build\build-core.proj = ..\build\build-core.proj ..\build\build.bat = ..\build\build.bat ..\build\build.proj = ..\build\build.proj ..\build\build.tasks = ..\build\build.tasks - ..\build\copy.bat = ..\build\copy.bat + Directory.Build.props = Directory.Build.props + ..\tests\Directory.Build.props = ..\tests\Directory.Build.props ..\README.md = ..\README.md - ..\NuGet\ServiceStack.Redis\servicestack.redis.nuspec = ..\NuGet\ServiceStack.Redis\servicestack.redis.nuspec - ..\NuGet.Signed\ServiceStack.Redis.Signed\servicestack.redis.signed.nuspec = ..\NuGet.Signed\ServiceStack.Redis.Signed\servicestack.redis.signed.nuspec + ServiceStack.Redis\ServiceStack.Redis.Core.csproj = ServiceStack.Redis\ServiceStack.Redis.Core.csproj EndProjectSection EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis", "ServiceStack.Redis\ServiceStack.Redis.csproj", "{AF99F19B-4C04-4F58-81EF-B092F1FCC540}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceStack.Redis", "ServiceStack.Redis\ServiceStack.Redis.csproj", "{AF99F19B-4C04-4F58-81EF-B092F1FCC540}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis.Tests", "..\tests\ServiceStack.Redis.Tests\ServiceStack.Redis.Tests.csproj", "{951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceStack.Redis.Tests", "..\tests\ServiceStack.Redis.Tests\ServiceStack.Redis.Tests.csproj", "{951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Console.Tests", "..\tests\Console.Tests\Console.Tests.csproj", "{8368C965-B4F6-4263-9ABB-731A175B2E77}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceStack.Redis.Tests.Sentinel", "..\tests\ServiceStack.Redis.Tests.Sentinel\ServiceStack.Redis.Tests.Sentinel.csproj", "{91C55091-A946-49B5-9517-8794EBCC5784}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis.Tests.Sentinel", "..\tests\ServiceStack.Redis.Tests.Sentinel\ServiceStack.Redis.Tests.Sentinel.csproj", "{91C55091-A946-49B5-9517-8794EBCC5784}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ServiceStack.Redis.Benchmark", "..\tests\ServiceStack.Redis.Benchmark\ServiceStack.Redis.Benchmark.csproj", "{959CA5FE-6525-4EEF-86CA-F4978BEFF14F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Console.Tests", "..\tests\Console.Tests\Console.Tests.csproj", "{56DEDC64-B349-4150-BE9C-5805D831678D}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 Release|x86 = Release|x86 - Signed|Any CPU = Signed|Any CPU - Signed|x86 = Signed|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|Any CPU.Build.0 = Debug|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.ActiveCfg = Debug|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.Build.0 = Debug|x86 + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x64.ActiveCfg = Debug|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x64.Build.0 = Debug|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.ActiveCfg = Debug|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Debug|x86.Build.0 = Debug|Any CPU {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|Any CPU.ActiveCfg = Release|Any CPU {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|Any CPU.Build.0 = Release|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.ActiveCfg = Release|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.Build.0 = Release|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Signed|Any CPU.ActiveCfg = Signed|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Signed|Any CPU.Build.0 = Signed|Any CPU - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Signed|x86.ActiveCfg = Signed|x86 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Signed|x86.Build.0 = Signed|x86 + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x64.ActiveCfg = Release|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x64.Build.0 = Release|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.ActiveCfg = Release|Any CPU + {AF99F19B-4C04-4F58-81EF-B092F1FCC540}.Release|x86.Build.0 = Release|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.ActiveCfg = Debug|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.Build.0 = Debug|x86 + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x64.ActiveCfg = Debug|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x64.Build.0 = Debug|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.ActiveCfg = Debug|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Debug|x86.Build.0 = Debug|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|Any CPU.ActiveCfg = Release|Any CPU {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|Any CPU.Build.0 = Release|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.ActiveCfg = Release|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.Build.0 = Release|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Signed|Any CPU.ActiveCfg = Signed|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Signed|Any CPU.Build.0 = Signed|Any CPU - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Signed|x86.ActiveCfg = Signed|x86 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Signed|x86.Build.0 = Signed|x86 - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Debug|x86.ActiveCfg = Debug|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Release|Any CPU.ActiveCfg = Release|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Release|Any CPU.Build.0 = Release|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Release|x86.ActiveCfg = Release|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Signed|Any CPU.ActiveCfg = Release|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Signed|Any CPU.Build.0 = Release|Any CPU - {8368C965-B4F6-4263-9ABB-731A175B2E77}.Signed|x86.ActiveCfg = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x64.ActiveCfg = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x64.Build.0 = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.ActiveCfg = Release|Any CPU + {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A}.Release|x86.Build.0 = Release|Any CPU {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|Any CPU.Build.0 = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|x64.ActiveCfg = Debug|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|x64.Build.0 = Debug|Any CPU {91C55091-A946-49B5-9517-8794EBCC5784}.Debug|x86.ActiveCfg = Debug|Any CPU {91C55091-A946-49B5-9517-8794EBCC5784}.Release|Any CPU.ActiveCfg = Release|Any CPU {91C55091-A946-49B5-9517-8794EBCC5784}.Release|Any CPU.Build.0 = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|x64.ActiveCfg = Release|Any CPU + {91C55091-A946-49B5-9517-8794EBCC5784}.Release|x64.Build.0 = Release|Any CPU {91C55091-A946-49B5-9517-8794EBCC5784}.Release|x86.ActiveCfg = Release|Any CPU - {91C55091-A946-49B5-9517-8794EBCC5784}.Signed|Any CPU.ActiveCfg = Release|Any CPU - {91C55091-A946-49B5-9517-8794EBCC5784}.Signed|Any CPU.Build.0 = Release|Any CPU - {91C55091-A946-49B5-9517-8794EBCC5784}.Signed|x86.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x64.ActiveCfg = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x64.Build.0 = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x86.ActiveCfg = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Debug|x86.Build.0 = Debug|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|Any CPU.Build.0 = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x64.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x64.Build.0 = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x86.ActiveCfg = Release|Any CPU + {959CA5FE-6525-4EEF-86CA-F4978BEFF14F}.Release|x86.Build.0 = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x64.ActiveCfg = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x64.Build.0 = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x86.ActiveCfg = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Debug|x86.Build.0 = Debug|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|Any CPU.Build.0 = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x64.ActiveCfg = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x64.Build.0 = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x86.ActiveCfg = Release|Any CPU + {56DEDC64-B349-4150-BE9C-5805D831678D}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {EDB2082E-E2C1-4E9D-9A60-F937634254A6} + EndGlobalSection EndGlobal diff --git a/src/ServiceStack.Redis.sln.DotSettings b/src/ServiceStack.Redis.sln.DotSettings index 11f2c267..28747c5c 100644 --- a/src/ServiceStack.Redis.sln.DotSettings +++ b/src/ServiceStack.Redis.sln.DotSettings @@ -1,3 +1,4 @@  + True <data><IncludeFilters /><ExcludeFilters /></data> <data /> \ No newline at end of file diff --git a/src/ServiceStack.Redis/BasicRedisClientManager.Async.cs b/src/ServiceStack.Redis/BasicRedisClientManager.Async.cs new file mode 100644 index 00000000..1b2bd382 --- /dev/null +++ b/src/ServiceStack.Redis/BasicRedisClientManager.Async.cs @@ -0,0 +1,180 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Caching; +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + /// + /// Provides thread-safe retrieval of redis clients since each client is a new one. + /// Allows the configuration of different ReadWrite and ReadOnly hosts + /// + public partial class BasicRedisClientManager + : IRedisClientsManagerAsync, ICacheClientAsync + { + private ValueTask GetCacheClientAsync(in CancellationToken _) + => new RedisClientManagerCacheClient(this).AsValueTaskResult(); + + private ValueTask GetReadOnlyCacheClientAsync(in CancellationToken _) + => ConfigureRedisClientAsync(this.GetReadOnlyClientImpl()).AsValueTaskResult(); + + private IRedisClientAsync ConfigureRedisClientAsync(IRedisClientAsync client) + => client; + + ValueTask IRedisClientsManagerAsync.GetCacheClientAsync(CancellationToken token) + => GetCacheClientAsync(token); + + ValueTask IRedisClientsManagerAsync.GetClientAsync(CancellationToken token) + => GetClientImpl().AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyCacheClientAsync(CancellationToken token) + => GetReadOnlyCacheClientAsync(token); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyClientAsync(CancellationToken token) + => GetReadOnlyClientImpl().AsValueTaskResult(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + async Task ICacheClientAsync.GetAsync(string key, CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + return await client.GetAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.FlushAllAsync(CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.FlushAllAsync(token).ConfigureAwait(false); + } + + async Task> ICacheClientAsync.GetAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + return await client.GetAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAllAsync(IDictionary values, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.SetAllAsync(values, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.RemoveAsync(string key, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.RemoveAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.RemoveAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.RemoveAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.IncrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.IncrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.DecrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.DecrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + return await client.GetTimeToLiveAsync(key, token).ConfigureAwait(false); + } + + async IAsyncEnumerable ICacheClientAsync.GetKeysByPatternAsync(string pattern, [EnumeratorCancellation] CancellationToken token) + { + await using var client = await GetReadOnlyCacheClientAsync(token).ConfigureAwait(false); + await foreach (var key in client.GetKeysByPatternAsync(pattern, token).ConfigureAwait(false).WithCancellation(token)) + { + yield return key; + } + } + + async Task ICacheClientAsync.RemoveExpiredEntriesAsync(CancellationToken token) + { + await using var client = await GetCacheClientAsync(token).ConfigureAwait(false); + await client.RemoveExpiredEntriesAsync(token).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs b/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs index 19a81a0a..aedf60eb 100644 --- a/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs +++ b/src/ServiceStack.Redis/BasicRedisClientManager.ICacheClient.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -19,163 +19,120 @@ namespace ServiceStack.Redis /// /// BasicRedisClientManager for ICacheClient /// - /// For more interoperabilty I'm also implementing the ICacheClient on + /// For more interoperability I'm also implementing the ICacheClient on /// this cache client manager which has the affect of calling /// GetCacheClient() for all write operations and GetReadOnlyCacheClient() /// for the read ones. /// - /// This works well for master-slave replication scenarios where you have - /// 1 master that replicates to multiple read slaves. + /// This works well for master-replica replication scenarios where you have + /// 1 master that replicates to multiple read replicas. /// public partial class BasicRedisClientManager : ICacheClient { - public ICacheClient GetCacheClient() - { - return new RedisClientManagerCacheClient(this); - } + public ICacheClient GetCacheClient() => + new RedisClientManagerCacheClient(this); - public ICacheClient GetReadOnlyCacheClient() - { - return ConfigureRedisClient(this.GetReadOnlyClient()); - } + public ICacheClient GetReadOnlyCacheClient() => + ConfigureRedisClient(this.GetReadOnlyClientImpl()); - private ICacheClient ConfigureRedisClient(IRedisClient client) - { - return client; - } - - #region Implementation of ICacheClient + private ICacheClient ConfigureRedisClient(IRedisClient client) => client; public bool Remove(string key) { - using (var client = GetReadOnlyCacheClient()) - { - return client.Remove(key); - } + using var client = GetReadOnlyCacheClient(); + return client.Remove(key); } public void RemoveAll(IEnumerable keys) { - using (var client = GetCacheClient()) - { - client.RemoveAll(keys); - } + using var client = GetCacheClient(); + client.RemoveAll(keys); } public T Get(string key) { - using (var client = GetReadOnlyCacheClient()) - { - return client.Get(key); - } + using var client = GetReadOnlyCacheClient(); + return client.Get(key); } public long Increment(string key, uint amount) { - using (var client = GetCacheClient()) - { - return client.Increment(key, amount); - } + using var client = GetCacheClient(); + return client.Increment(key, amount); } public long Decrement(string key, uint amount) { - using (var client = GetCacheClient()) - { - return client.Decrement(key, amount); - } + using var client = GetCacheClient(); + return client.Decrement(key, amount); } public bool Add(string key, T value) { - using (var client = GetCacheClient()) - { - return client.Add(key, value); - } + using var client = GetCacheClient(); + return client.Add(key, value); } public bool Set(string key, T value) { - using (var client = GetCacheClient()) - { - return client.Set(key, value); - } + using var client = GetCacheClient(); + return client.Set(key, value); } public bool Replace(string key, T value) { - using (var client = GetCacheClient()) - { - return client.Replace(key, value); - } + using var client = GetCacheClient(); + return client.Replace(key, value); } public bool Add(string key, T value, DateTime expiresAt) { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresAt); - } + using var client = GetCacheClient(); + return client.Add(key, value, expiresAt); } public bool Set(string key, T value, DateTime expiresAt) { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresAt); - } + using var client = GetCacheClient(); + return client.Set(key, value, expiresAt); } public bool Replace(string key, T value, DateTime expiresAt) { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresAt); - } + using var client = GetCacheClient(); + return client.Replace(key, value, expiresAt); } public bool Add(string key, T value, TimeSpan expiresIn) { - using (var client = GetCacheClient()) - { - return client.Add(key, value, expiresIn); - } + using var client = GetCacheClient(); + return client.Add(key, value, expiresIn); } public bool Set(string key, T value, TimeSpan expiresIn) { - using (var client = GetCacheClient()) - { - return client.Set(key, value, expiresIn); - } + using var client = GetCacheClient(); + return client.Set(key, value, expiresIn); } public bool Replace(string key, T value, TimeSpan expiresIn) { - using (var client = GetCacheClient()) - { - return client.Replace(key, value, expiresIn); - } + using var client = GetCacheClient(); + return client.Replace(key, value, expiresIn); } public void FlushAll() { - using (var client = GetCacheClient()) - { - client.FlushAll(); - } + using var client = GetCacheClient(); + client.FlushAll(); } public IDictionary GetAll(IEnumerable keys) { - using (var client = GetReadOnlyCacheClient()) - { - return client.GetAll(keys); - } + using var client = GetReadOnlyCacheClient(); + return client.GetAll(keys); } - - #endregion } diff --git a/src/ServiceStack.Redis/BasicRedisClientManager.cs b/src/ServiceStack.Redis/BasicRedisClientManager.cs index 7938ff23..d42c4f51 100644 --- a/src/ServiceStack.Redis/BasicRedisClientManager.cs +++ b/src/ServiceStack.Redis/BasicRedisClientManager.cs @@ -5,25 +5,29 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; +using System.Linq; using System.Threading; +using ServiceStack.Logging; using ServiceStack.Text; namespace ServiceStack.Redis { /// - /// Provides thread-safe retrievel of redis clients since each client is a new one. + /// Provides thread-safe retrieval of redis clients since each client is a new one. /// Allows the configuration of different ReadWrite and ReadOnly hosts /// public partial class BasicRedisClientManager : IRedisClientsManager, IRedisFailover, IHasRedisResolver { + public static ILog Log = LogManager.GetLogger(typeof(BasicRedisClientManager)); + public int? ConnectTimeout { get; set; } public int? SocketSendTimeout { get; set; } public int? SocketReceiveTimeout { get; set; } @@ -95,7 +99,8 @@ protected virtual void OnStart() /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts /// /// - public IRedisClient GetClient() + public IRedisClient GetClient() => GetClientImpl(); + private RedisClient GetClientImpl() { var client = InitNewClient(RedisResolver.CreateMasterClient(readWriteHostsIndex++)); return client; @@ -105,7 +110,8 @@ public IRedisClient GetClient() /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. /// /// - public virtual IRedisClient GetReadOnlyClient() + public virtual IRedisClient GetReadOnlyClient() => GetReadOnlyClientImpl(); + private RedisClient GetReadOnlyClientImpl() { var client = InitNewClient(RedisResolver.CreateSlaveClient(readOnlyHostsIndex++)); return client; @@ -154,10 +160,15 @@ public void FailoverTo(IEnumerable readWriteHosts, IEnumerable r { Interlocked.Increment(ref RedisState.TotalFailovers); + var masters = readWriteHosts.ToList(); + var replicas = readOnlyHosts.ToList(); + + Log.Info($"FailoverTo: {string.Join(",", masters)} : {string.Join(",", replicas)} Total: {RedisState.TotalFailovers}"); + lock (this) { - RedisResolver.ResetMasters(readWriteHosts); - RedisResolver.ResetSlaves(readOnlyHosts); + RedisResolver.ResetMasters(masters); + RedisResolver.ResetSlaves(replicas); } Start(); diff --git a/src/ServiceStack.Redis/BasicRedisResolver.cs b/src/ServiceStack.Redis/BasicRedisResolver.cs index f757820f..31986996 100644 --- a/src/ServiceStack.Redis/BasicRedisResolver.cs +++ b/src/ServiceStack.Redis/BasicRedisResolver.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Linq; using ServiceStack.Logging; -using ServiceStack.Text; namespace ServiceStack.Redis { @@ -16,21 +15,15 @@ public class BasicRedisResolver : IRedisResolver, IRedisResolverExtended public int ReadOnlyHostsCount { get; private set; } private RedisEndpoint[] masters; - private RedisEndpoint[] slaves; + private RedisEndpoint[] replicas; - public RedisEndpoint[] Masters - { - get { return masters; } - } - public RedisEndpoint[] Slaves - { - get { return slaves; } - } + public RedisEndpoint[] Masters => masters; + public RedisEndpoint[] Replicas => replicas; - public BasicRedisResolver(IEnumerable masters, IEnumerable slaves) + public BasicRedisResolver(IEnumerable masters, IEnumerable replicas) { ResetMasters(masters.ToList()); - ResetSlaves(slaves.ToList()); + ResetSlaves(replicas.ToList()); ClientFactory = RedisConfig.ClientFactory; } @@ -56,13 +49,13 @@ public virtual void ResetSlaves(IEnumerable hosts) ResetSlaves(hosts.ToRedisEndPoints()); } - public virtual void ResetSlaves(List newSlaves) + public virtual void ResetSlaves(List newReplicas) { - slaves = (newSlaves ?? TypeConstants.EmptyList).ToArray(); - ReadOnlyHostsCount = slaves.Length; + replicas = (newReplicas ?? TypeConstants.EmptyList).ToArray(); + ReadOnlyHostsCount = replicas.Length; if (log.IsDebugEnabled) - log.Debug("New Redis Slaves: " + string.Join(", ", slaves.Map(x => x.GetHostString()))); + log.Debug("New Redis Replicas: " + string.Join(", ", replicas.Map(x => x.GetHostString()))); } public RedisClient CreateRedisClient(RedisEndpoint config, bool master) @@ -78,7 +71,7 @@ public RedisEndpoint GetReadWriteHost(int desiredIndex) public RedisEndpoint GetReadOnlyHost(int desiredIndex) { return ReadOnlyHostsCount > 0 - ? slaves[desiredIndex % slaves.Length] + ? replicas[desiredIndex % replicas.Length] : GetReadWriteHost(desiredIndex); } diff --git a/src/ServiceStack.Redis/BufferPool.cs b/src/ServiceStack.Redis/BufferPool.cs deleted file mode 100644 index 901ed8bc..00000000 --- a/src/ServiceStack.Redis/BufferPool.cs +++ /dev/null @@ -1,85 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; - -namespace ServiceStack.Redis -{ - /// - /// Courtesy of @marcgravell - /// http://code.google.com/p/protobuf-net/source/browse/trunk/protobuf-net/BufferPool.cs - /// - internal class BufferPool - { - internal static void Flush() - { - for (int i = 0; i < pool.Length; i++) - { - Interlocked.Exchange(ref pool[i], null); // and drop the old value on the floor - } - } - - private BufferPool() { } - const int PoolSize = 1000; //1.45MB - //internal const int BufferLength = 1450; //MTU size - some headers - private static readonly object[] pool = new object[PoolSize]; - - internal static byte[] GetBuffer(int bufferSize) - { - return bufferSize > RedisConfig.BufferPoolMaxSize - ? new byte[bufferSize] - : GetBuffer(); - } - - internal static byte[] GetBuffer() - { - object tmp; - for (int i = 0; i < pool.Length; i++) - { - if ((tmp = Interlocked.Exchange(ref pool[i], null)) != null) - return (byte[])tmp; - } - return new byte[RedisConfig.BufferLength]; - } - - internal static void ResizeAndFlushLeft(ref byte[] buffer, int toFitAtLeastBytes, int copyFromIndex, int copyBytes) - { - Debug.Assert(buffer != null); - Debug.Assert(toFitAtLeastBytes > buffer.Length); - Debug.Assert(copyFromIndex >= 0); - Debug.Assert(copyBytes >= 0); - - // try doubling, else match - int newLength = buffer.Length * 2; - if (newLength < toFitAtLeastBytes) newLength = toFitAtLeastBytes; - - var newBuffer = new byte[newLength]; - if (copyBytes > 0) - { - Buffer.BlockCopy(buffer, copyFromIndex, newBuffer, 0, copyBytes); - } - if (buffer.Length == RedisConfig.BufferLength) - { - ReleaseBufferToPool(ref buffer); - } - buffer = newBuffer; - } - - internal static void ReleaseBufferToPool(ref byte[] buffer) - { - if (buffer == null) return; - if (buffer.Length == RedisConfig.BufferLength) - { - for (int i = 0; i < pool.Length; i++) - { - if (Interlocked.CompareExchange(ref pool[i], buffer, null) == null) - { - break; // found a null; swapped it in - } - } - } - // if no space, just drop it on the floor - buffer = null; - } - - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/BufferedReader.Async.cs b/src/ServiceStack.Redis/BufferedReader.Async.cs new file mode 100644 index 00000000..39ad9d7e --- /dev/null +++ b/src/ServiceStack.Redis/BufferedReader.Async.cs @@ -0,0 +1,93 @@ +using ServiceStack.Redis.Internal; +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal sealed partial class BufferedReader + { + internal ValueTask ReadByteAsync(in CancellationToken token = default) + => _available > 0 ? ReadByteFromBuffer().AsValueTaskResult() : ReadByteSlowAsync(token); + + private ValueTask ReadByteSlowAsync(in CancellationToken token) + { + token.ThrowIfCancellationRequested(); + _offset = 0; +#if ASYNC_MEMORY + var pending = _source.ReadAsync(new Memory(_buffer), token); + if (!pending.IsCompletedSuccessfully) + return Awaited(this, pending); +#else + var pending = _source.ReadAsync(_buffer, 0, _buffer.Length, token); + if (pending.Status != TaskStatus.RanToCompletion) + return Awaited(this, pending); +#endif + + _available = pending.Result; + return (_available > 0 ? ReadByteFromBuffer() : -1).AsValueTaskResult(); + +#if ASYNC_MEMORY + static async ValueTask Awaited(BufferedReader @this, ValueTask pending) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadByteFromBuffer() : -1; + } +#else + static async ValueTask Awaited(BufferedReader @this, Task pending) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadByteFromBuffer() : -1; + } +#endif + } + + internal ValueTask ReadAsync(byte[] buffer, int offset, int count, in CancellationToken token = default) + => _available > 0 + ? ReadFromBuffer(buffer, offset, count).AsValueTaskResult() + : ReadSlowAsync(buffer, offset, count, token); + + private ValueTask ReadSlowAsync(byte[] buffer, int offset, int count, in CancellationToken token) + { + // if they're asking for more than we deal in, just step out of the way + if (count >= buffer.Length) + { +#if ASYNC_MEMORY + return _source.ReadAsync(new Memory(buffer, offset, count), token); +#else + return new ValueTask(_source.ReadAsync(buffer, offset, count, token)); +#endif + } + + // they're asking for less, so we could still have some left + _offset = 0; +#if ASYNC_MEMORY + var pending = _source.ReadAsync(new Memory(_buffer), token); + if (!pending.IsCompletedSuccessfully) + return Awaited(this, pending, buffer, offset, count); + + _available = pending.Result; // already checked status, this is fine + return (_available > 0 ? ReadFromBuffer(buffer, offset, count) : 0).AsValueTaskResult(); + + static async ValueTask Awaited(BufferedReader @this, ValueTask pending, byte[] buffer, int offset, int count) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadFromBuffer(buffer, offset, count) : 0; + } +#else + var pending = _source.ReadAsync(_buffer, 0, _buffer.Length, token); + if (pending.Status != TaskStatus.RanToCompletion) + return Awaited(this, pending, buffer, offset, count); + + _available = pending.Result; // already checked status, this is fine + return (_available > 0 ? ReadFromBuffer(buffer, offset, count) : 0).AsValueTaskResult(); + + static async ValueTask Awaited(BufferedReader @this, Task pending, byte[] buffer, int offset, int count) + { + @this._available = await pending.ConfigureAwait(false); + return @this._available > 0 ? @this.ReadFromBuffer(buffer, offset, count) : 0; + } +#endif + } + } +} diff --git a/src/ServiceStack.Redis/BufferedReader.cs b/src/ServiceStack.Redis/BufferedReader.cs new file mode 100644 index 00000000..b5aa67df --- /dev/null +++ b/src/ServiceStack.Redis/BufferedReader.cs @@ -0,0 +1,81 @@ +using System; +using System.IO; + +namespace ServiceStack.Redis +{ + /// + /// BufferedReader is a minimal buffer implementation that provides + /// efficient sync and async access for byte-by-byte consumption; + /// like BufferedStream, but with the async part + /// + internal sealed partial class BufferedReader : IDisposable + { + private readonly Stream _source; + readonly byte[] _buffer; + private int _offset, _available; + public void Dispose() + { + _available = 0; + _source.Dispose(); + } + internal void Close() + { + _available = 0; + _source.Close(); + } + + internal BufferedReader(Stream source, int bufferSize) + { + _source = source; + _buffer = new byte[bufferSize]; + Reset(); + } + + internal void Reset() + { + _offset = _available = 0; + } + + internal int ReadByte() + => _available > 0 ? ReadByteFromBuffer() : ReadByteSlow(); + + private int ReadByteFromBuffer() + { + --_available; + return _buffer[_offset++]; + } + + private int ReadByteSlow() + { + _available = _source.Read(_buffer, _offset = 0, _buffer.Length); + return _available > 0 ? ReadByteFromBuffer() : -1; + } + + + private int ReadFromBuffer(byte[] buffer, int offset, int count) + { + // we have data in the buffer; hand it back + if (_available < count) count = _available; + Buffer.BlockCopy(_buffer, _offset, buffer, offset, count); + _available -= count; + _offset += count; + return count; + } + + internal int Read(byte[] buffer, int offset, int count) + => _available > 0 + ? ReadFromBuffer(buffer, offset, count) + : ReadSlow(buffer, offset, count); + + private int ReadSlow(byte[] buffer, int offset, int count) + { + // if they're asking for more than we deal in, just step out of the way + if (count >= buffer.Length) + return _source.Read(buffer, offset, count); + + // they're asking for less, so we could still have some left + _available = _source.Read(_buffer, _offset = 0, _buffer.Length); + return _available > 0 ? ReadFromBuffer(buffer, offset, count) : 0; + } + } +} diff --git a/src/ServiceStack.Redis/BufferedStream.cs b/src/ServiceStack.Redis/BufferedStream.cs index 2fbd73ae..2d745c5f 100644 --- a/src/ServiceStack.Redis/BufferedStream.cs +++ b/src/ServiceStack.Redis/BufferedStream.cs @@ -1,20 +1,25 @@ -#if NETSTANDARD1_3 +#if NETCORE using System; using System.IO; using System.Net.Sockets; namespace ServiceStack.Redis { + // recommendation: mark this obsolete as it is incomplete, and no longer used; + // I've marked it obsolete in DEBUG to be sure +#if DEBUG + [Obsolete("Prefer System.IO.BufferedStream")] +#endif public sealed class BufferedStream : Stream { - NetworkStream networkStream; + Stream networkStream; public BufferedStream(Stream stream) : this(stream, 0) {} public BufferedStream(Stream stream, int bufferSize) { - networkStream = (NetworkStream)stream; + networkStream = stream; } public override bool CanRead => networkStream.CanRead; diff --git a/src/ServiceStack.Redis/ConnectionUtils.cs b/src/ServiceStack.Redis/ConnectionUtils.cs deleted file mode 100644 index f1731b3e..00000000 --- a/src/ServiceStack.Redis/ConnectionUtils.cs +++ /dev/null @@ -1,449 +0,0 @@ -//using System; -//using System.Collections.Generic; -//using System.Collections.Specialized; -//using System.IO; -//using System.Linq; -//using System.Text; - -//namespace ServiceStack.Redis -//{ -// /// -// /// Provides utility methods for managing connections to multiple (master/slave) redis servers (with the same -// /// information - not sharding). -// /// -// public static class ConnectionUtils -// { -// /// -// /// Inspect the provided configration, and connect to the available servers to report which server is the preferred/active node. -// /// -// public static string SelectConfiguration(string configuration, out string[] availableEndpoints, TextWriter log = null) -// { -// string selected; -// using (SelectAndCreateConnection(configuration, log, out selected, out availableEndpoints, false)) { } -// return selected; -// } -// /// -// /// Inspect the provided configration, and connect to the preferred/active node after checking what nodes are available. -// /// -// public static RedisConnection Connect(string configuration, TextWriter log = null) -// { -// string selectedConfiguration; -// string[] availableEndpoints; -// return SelectAndCreateConnection(configuration, log, out selectedConfiguration, out availableEndpoints, true); -// } - -// /// -// /// Subscribe to perform some operation when a change to the preferred/active node is broadcast. -// /// -// public static void SubscribeToMasterSwitch(RedisSubscriberConnection connection, Action handler) -// { -// if (connection == null) throw new ArgumentNullException("connection"); -// if (handler == null) throw new ArgumentNullException("handler"); - -// connection.Subscribe(RedisMasterChangedChannel, (channel, message) => handler(Encoding.UTF8.GetString(message))); -// } -// /// -// /// Using the configuration available, and after checking which nodes are available, switch the master node and broadcast this change. -// /// -// public static void SwitchMaster(string configuration, string newMaster, TextWriter log = null) -// { -// string newConfig; -// string[] availableEndpoints; - -// SelectAndCreateConnection(configuration, log, out newConfig, out availableEndpoints, false, newMaster); -// } - -// const string RedisMasterChangedChannel = "__Booksleeve_MasterChanged", TieBreakerKey = "__Booksleeve_TieBreak"; - -// /// -// /// Prompt all clients to reconnect. -// /// -// public static void BroadcastReconnectMessage(RedisConnection connection) -// { -// if (connection == null) throw new ArgumentNullException("connection"); - -// connection.Wait(connection.Publish(RedisMasterChangedChannel, "*")); -// } -// private static RedisConnection SelectWithTieBreak(TextWriter log, List nodes, Dictionary tiebreakers) -// { -// if (nodes.Count == 0) return null; -// if (nodes.Count == 1) return nodes[0]; -// Func valueOrDefault = key => -// { -// int tmp; -// if (!tiebreakers.TryGetValue(key, out tmp)) tmp = 0; -// return tmp; -// }; -// var tuples = (from node in nodes -// let key = node.Host + ":" + node.Port -// let count = valueOrDefault(key) -// select new { Node = node, Key = key, Count = count }).ToList(); - -// // check for uncontested scenario -// int contenderCount = tuples.Count(x => x.Count > 0); -// switch (contenderCount) -// { -// case 0: -// log.WriteLine("No tie-break contenders; selecting arbitrary node"); -// return tuples[0].Node; -// case 1: -// log.WriteLine("Unaminous tie-break winner"); -// return tuples.Single(x => x.Count > 0).Node; -// } - -// // contested -// int maxCount = tuples.Max(x => x.Count); -// var competing = tuples.Where(x => x.Count == maxCount).ToList(); - -// switch (competing.Count) -// { -// case 0: -// return null; // impossible, but never rely on the impossible not happening ;p -// case 1: -// log.WriteLine("Contested, but clear, tie-break winner"); -// break; -// default: -// log.WriteLine("Contested and ambiguous tie-break; selecting arbitrary node"); -// break; -// } -// return competing[0].Node; -// } - -// private static string[] GetConfigurationOptions(string configuration, out int syncTimeout, out bool allowAdmin) -// { -// syncTimeout = 1000; -// allowAdmin = false; - -// // break it down by commas -// var arr = configuration.Split(','); -// var options = new List(); -// foreach (var option in arr) -// { -// var trimmed = option.Trim(); - -// if (trimmed.IsNullOrWhiteSpace() || options.Contains(trimmed)) continue; - -// // check for special tokens -// int idx = trimmed.IndexOf('='); -// if (idx > 0) -// { -// if (option.StartsWith(SyncTimeoutPrefix)) -// { -// int tmp; -// if (int.TryParse(option.Substring(idx + 1), out tmp)) syncTimeout = tmp; -// continue; -// } -// if (option.StartsWith(AllowAdminPrefix)) -// { -// bool tmp; -// if (bool.TryParse(option.Substring(idx + 1), out tmp)) allowAdmin = tmp; -// continue; -// } -// } - -// options.Add(trimmed); -// } -// return options.ToArray(); -// } - -// internal const string AllowAdminPrefix = "allowAdmin=", SyncTimeoutPrefix = "syncTimeout="; -// private static RedisConnection SelectAndCreateConnection(string configuration, TextWriter log, out string selectedConfiguration, out string[] availableEndpoints, bool autoMaster, string newMaster = null) -// { -// int syncTimeout; -// bool allowAdmin; -// if (log == null) log = new StringWriter(); -// var arr = GetConfigurationOptions(configuration, out syncTimeout, out allowAdmin); -// if (!newMaster.IsNullOrWhiteSpace()) allowAdmin = true; // need this to diddle the slave/master config - -// log.WriteLine("{0} unique nodes specified", arr.Length); -// log.WriteLine("sync timeout: {0}ms, admin commands: {1}", syncTimeout, -// allowAdmin ? "enabled" : "disabled"); -// if (arr.Length == 0) -// { -// log.WriteLine("No nodes to consider"); -// selectedConfiguration = null; -// availableEndpoints = new string[0]; -// return null; -// } -// var connections = new List(arr.Length); -// RedisConnection preferred = null; - -// try -// { -// var infos = new List>(arr.Length); -// var tiebreakers = new List>(arr.Length); -// foreach (var option in arr) -// { -// if (option.IsNullOrWhiteSpace()) continue; - -// RedisConnection conn = null; -// try -// { - -// var parts = option.Split(':'); -// if (parts.Length == 0) continue; - -// string host = parts[0].Trim(); -// int port = 6379, tmp; -// if (parts.Length > 1 && int.TryParse(parts[1].Trim(), out tmp)) port = tmp; -// conn = new RedisConnection(host, port, syncTimeout: syncTimeout, allowAdmin: allowAdmin); - -// log.WriteLine("Opening connection to {0}:{1}...", host, port); -// conn.Open(); -// var info = conn.GetInfo(); -// var tiebreak = conn.Strings.GetString(0, TieBreakerKey); -// connections.Add(conn); -// infos.Add(info); -// tiebreakers.Add(tiebreak); -// } -// catch (Exception ex) -// { -// if (conn == null) -// { -// log.WriteLine("Error parsing option \"{0}\": {1}", option, ex.Message); -// } -// else -// { -// log.WriteLine("Error connecting: {0}", ex.Message); -// } -// } -// } -// List masters = new List(), slaves = new List(); -// var breakerScores = new Dictionary(); -// foreach (var tiebreak in tiebreakers) -// { -// try -// { -// if (tiebreak.Wait(syncTimeout)) -// { -// string key = tiebreak.Result; -// if (key.IsNullOrWhiteSpace()) continue; -// int score; -// if (breakerScores.TryGetValue(key, out score)) breakerScores[key] = score + 1; -// else breakerScores.Add(key, 1); -// } -// } -// catch { /* if a node is down, that's fine too */ } -// } -// // check for tie-breakers (i.e. when we store which is the master) -// switch (breakerScores.Count) -// { -// case 0: -// log.WriteLine("No tie-breakers found"); -// break; -// case 1: -// log.WriteLine("Tie-breaker is unanimous: {0}", breakerScores.Keys.Single()); -// break; -// default: -// log.WriteLine("Ambiguous tie-breakers:"); -// foreach (var kvp in breakerScores.OrderByDescending(x => x.Value)) -// { -// log.WriteLine("\t{0}: {1}", kvp.Key, kvp.Value); -// } -// break; -// } - -// for (int i = 0; i < connections.Count; i++) -// { -// log.WriteLine("Reading configuration from {0}:{1}...", connections[i].Host, connections[i].Port); -// try -// { -// if (!infos[i].Wait(syncTimeout)) -// { -// log.WriteLine("\tTimeout fetching INFO"); -// continue; -// } -// var infoPairs = new StringDictionary(); -// using (var sr = new StringReader(infos[i].Result)) -// { -// string line; -// while ((line = sr.ReadLine()) != null) -// { -// int idx = line.IndexOf(':'); -// if (idx < 0) continue; -// string key = line.Substring(0, idx).Trim(), -// value = line.Substring(idx + 1, line.Length - (idx + 1)).Trim(); -// infoPairs[key] = value; -// } -// } -// string role = infoPairs["role"]; -// switch (role) -// { -// case "slave": -// log.WriteLine("\tServer is SLAVE of {0}:{1}", -// infoPairs["master_host"], infoPairs["master_port"]); -// log.Write("\tLink is {0}, seen {1} seconds ago", -// infoPairs["master_link_status"], infoPairs["master_last_io_seconds_ago"]); -// if (infoPairs["master_sync_in_progress"] == "1") log.Write(" (sync is in progress)"); -// log.WriteLine(); -// slaves.Add(connections[i]); -// break; -// case "master": -// log.WriteLine("\tServer is MASTER, with {0} slaves", infoPairs["connected_slaves"]); -// masters.Add(connections[i]); -// break; -// default: -// log.WriteLine("\tUnknown role: {0}", role); -// break; -// } -// string tmp = infoPairs["connected_clients"]; -// int clientCount, channelCount, patternCount; -// if (tmp.IsNullOrWhiteSpace() || !int.TryParse(tmp, out clientCount)) clientCount = -1; -// tmp = infoPairs["pubsub_channels"]; -// if (tmp.IsNullOrWhiteSpace(tmp) || !int.TryParse(tmp, out channelCount)) channelCount = -1; -// tmp = infoPairs["pubsub_patterns"]; -// if (tmp.IsNullOrWhiteSpace(tmp) || !int.TryParse(tmp, out patternCount)) patternCount = -1; -// log.WriteLine("\tClients: {0}; channels: {1}; patterns: {2}", clientCount, channelCount, patternCount); -// } -// catch (Exception ex) -// { -// log.WriteLine("\tError reading INFO results: {0}", ex.Message); -// } -// } - -// if (newMaster == null) -// { -// switch (masters.Count) -// { -// case 0: -// switch (slaves.Count) -// { -// case 0: -// log.WriteLine("No masters or slaves found"); -// break; -// case 1: -// log.WriteLine("No masters found; selecting single slave"); -// preferred = slaves[0]; -// break; -// default: -// log.WriteLine("No masters found; considering {0} slaves...", slaves.Count); -// preferred = SelectWithTieBreak(log, slaves, breakerScores); -// break; -// } -// if (preferred != null) -// { -// if (autoMaster) -// { -// //LogException("Promoting redis SLAVE to MASTER"); -// log.WriteLine("Promoting slave to master..."); -// if (allowAdmin) -// { // can do on this connection -// preferred.Wait(preferred.Server.MakeMaster()); -// } -// else -// { // need an admin connection for this -// using (var adminPreferred = new RedisConnection(preferred.Host, preferred.Port, allowAdmin: true, syncTimeout: syncTimeout)) -// { -// adminPreferred.Open(); -// adminPreferred.Wait(adminPreferred.Server.MakeMaster()); -// } -// } -// } -// else -// { -// log.WriteLine("Slave should be promoted to master (but not done yet)..."); -// } -// } -// break; -// case 1: -// log.WriteLine("One master found; selecting"); -// preferred = masters[0]; -// break; -// default: -// log.WriteLine("Considering {0} masters...", masters.Count); -// preferred = SelectWithTieBreak(log, masters, breakerScores); -// break; -// } - - -// } -// else -// { // we have been instructed to change master server -// preferred = masters.Concat(slaves).FirstOrDefault(conn => (conn.Host + ":" + conn.Port) == newMaster); -// if (preferred == null) -// { -// log.WriteLine("Selected new master not available: {0}", newMaster); -// } -// else -// { -// int errorCount = 0; -// try -// { -// log.WriteLine("Promoting to master: {0}:{1}...", preferred.Host, preferred.Port); -// preferred.Wait(preferred.Server.MakeMaster()); -// preferred.Strings.Set(0, TieBreakerKey, newMaster); -// preferred.Wait(preferred.Publish(RedisMasterChangedChannel, newMaster)); -// } -// catch (Exception ex) -// { -// log.WriteLine("\t{0}", ex.Message); -// errorCount++; -// } - -// if (errorCount == 0) // only make slaves if the master was happy -// { -// foreach (var conn in masters.Concat(slaves)) -// { -// if (conn == preferred) continue; // can't make self a slave! - -// try -// { -// log.WriteLine("Enslaving: {0}:{1}...", conn.Host, conn.Port); -// // set the tie-breaker **first** in case of problems -// conn.Strings.Set(0, TieBreakerKey, newMaster); -// // and broadcast to anyone who thinks this is the master -// conn.Publish(RedisMasterChangedChannel, newMaster); -// // now make it a slave -// conn.Wait(conn.Server.MakeSlave(preferred.Host, preferred.Port)); -// } -// catch (Exception ex) -// { -// log.WriteLine("\t{0}", ex.Message); -// errorCount++; -// } -// } -// } -// if (errorCount != 0) -// { -// log.WriteLine("Things didn't go smoothly; CHECK WHAT HAPPENED!"); -// } - -// // want the connection disposed etc -// preferred = null; -// } -// } - -// if (preferred == null) -// { -// selectedConfiguration = null; -// } -// else -// { -// selectedConfiguration = preferred.Host + ":" + preferred.Port; -// log.WriteLine("Selected server {0}", selectedConfiguration); -// } - -// availableEndpoints = (from conn in masters.Concat(slaves) -// select conn.Host + ":" + conn.Port).ToArray(); -// return preferred; -// } -// finally -// { -// foreach (var conn in connections) -// { -// if (conn != null && conn != preferred) try { conn.Dispose(); } -// catch { } -// } -// } -// } - -// } - -// public static class ConnectionUtilsExtensions -// { -// public static bool IsNullOrWhiteSpace(this string str) -// { -// return str == null || str.Trim().Length == 0; -// } -// } -//} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.Async.cs b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.Async.cs new file mode 100644 index 00000000..0c75d760 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.Async.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; + +namespace ServiceStack.Redis.Generic +{ + /// + /// A complete redis command, with method to send command, receive response, and run callback on success or failure + /// + internal partial class QueuedRedisTypedCommand : QueuedRedisOperation + { + private Delegate _asyncReturnCommand; + partial void OnExecuteThrowIfAsync() + { + if (_asyncReturnCommand is object) + { + throw new InvalidOperationException("An async return command was present, but the queued operation is being processed synchronously"); + } + } + private QueuedRedisTypedCommand SetAsyncReturnCommand(Delegate value) + { + if (_asyncReturnCommand is object && _asyncReturnCommand != value) + throw new InvalidOperationException("Only a single async return command can be assigned"); + _asyncReturnCommand = value; + return this; + } + + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> VoidReturnCommandAsync) + => SetAsyncReturnCommand(VoidReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> IntReturnCommandAsync) + => SetAsyncReturnCommand(IntReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> LongReturnCommandAsync) + => SetAsyncReturnCommand(LongReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> BoolReturnCommandAsync) + => SetAsyncReturnCommand(BoolReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> BytesReturnCommandAsync) + => SetAsyncReturnCommand(BytesReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> MultiBytesReturnCommandAsync) + => SetAsyncReturnCommand(MultiBytesReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> StringReturnCommandAsync) + => SetAsyncReturnCommand(StringReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask>> MultiStringReturnCommandAsync) + => SetAsyncReturnCommand(MultiStringReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> DoubleReturnCommandAsync) + => SetAsyncReturnCommand(DoubleReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask>> MultiObjectReturnCommandAsync) + => SetAsyncReturnCommand(MultiObjectReturnCommandAsync); + internal QueuedRedisTypedCommand WithAsyncReturnCommand(Func, ValueTask> ObjectReturnCommandAsync) + => SetAsyncReturnCommand(ObjectReturnCommandAsync); + + public ValueTask ExecuteAsync(IRedisTypedClientAsync client) + { + try + { + switch (_asyncReturnCommand) + { + case null: + ExecuteThrowIfSync(); + return default; + case Func, ValueTask> VoidReturnCommandAsync: + return VoidReturnCommandAsync(client); + case Func, ValueTask> IntReturnCommandAsync: + return IntReturnCommandAsync(client).Await(); + case Func, ValueTask> LongReturnCommandAsync: + return LongReturnCommandAsync(client).Await(); + case Func, ValueTask> DoubleReturnCommandAsync: + return DoubleReturnCommandAsync(client).Await(); + case Func, ValueTask> BytesReturnCommandAsync: + return BytesReturnCommandAsync(client).Await(); + case Func, ValueTask> StringReturnCommandAsync: + return StringReturnCommandAsync(client).Await(); + case Func, ValueTask> MultiBytesReturnCommandAsync: + return MultiBytesReturnCommandAsync(client).Await(); + case Func, ValueTask>> MultiStringReturnCommandAsync: + return MultiStringReturnCommandAsync(client).Await(); + case object obj: + ExecuteThrowIfSync(); + return default; + } + } + catch (Exception ex) + { + Log.Error(ex); + return default; // non-async version swallows + } + } + + protected void ExecuteThrowIfSync() + { + if (VoidReturnCommand is object + || IntReturnCommand is object + || LongReturnCommand is object + || BoolReturnCommand is object + || BytesReturnCommand is object + || MultiBytesReturnCommand is object + || StringReturnCommand is object + || MultiStringReturnCommand is object + || DoubleReturnCommand is object + || MultiObjectReturnCommand is object + || ObjectReturnCommand is object) + { + throw new InvalidOperationException("A sync return command was present, but the queued operation is being processed asynchronously"); + } + } + + } +} diff --git a/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs index ca8b2870..fe9c8b0b 100644 --- a/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs +++ b/src/ServiceStack.Redis/Generic/QueuedRedisTypedCommand.cs @@ -1,14 +1,14 @@ -using System; +using ServiceStack.Redis.Pipeline; +using System; using System.Collections.Generic; -using ServiceStack.Redis.Generic; -using ServiceStack.Redis.Pipeline; +using System.Threading.Tasks; namespace ServiceStack.Redis.Generic { /// /// A complete redis command, with method to send command, receive response, and run callback on success or failure /// - internal class QueuedRedisTypedCommand : QueuedRedisOperation + internal partial class QueuedRedisTypedCommand : QueuedRedisOperation { public Action> VoidReturnCommand { get; set; } @@ -74,5 +74,7 @@ public void Execute(IRedisTypedClient client) } } + private void ExecuteThrowIfAsync() => OnExecuteThrowIfAsync(); + partial void OnExecuteThrowIfAsync(); } } diff --git a/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.Async.cs new file mode 100644 index 00000000..86de9a27 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.Async.cs @@ -0,0 +1,55 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientHash + : IRedisHashAsync + { + IRedisTypedClientAsync AsyncClient => client; + + ValueTask IRedisHashAsync.AddAsync(KeyValuePair item, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(this, item.Key, item.Value, token).Await(); + + ValueTask IRedisHashAsync.AddAsync(TKey key, T value, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(this, key, value, token).Await(); + + ValueTask IRedisHashAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveEntryAsync(new[] { this }, token).Await(); + + ValueTask IRedisHashAsync.ContainsKeyAsync(TKey key, CancellationToken token) + => AsyncClient.HashContainsEntryAsync(this, key, token); + + ValueTask IRedisHashAsync.CountAsync(CancellationToken token) + => AsyncClient.GetHashCountAsync(this, token).AsInt32(); + + ValueTask> IRedisHashAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllEntriesFromHashAsync(this, token); + + async IAsyncEnumerator> IAsyncEnumerable>.GetAsyncEnumerator(CancellationToken token) + { + var all = await AsyncClient.GetAllEntriesFromHashAsync(this, token).ConfigureAwait(false); + foreach (var pair in all) + { + yield return pair; + } + } + + ValueTask IRedisHashAsync.RemoveAsync(TKey key, CancellationToken token) + => AsyncClient.RemoveEntryFromHashAsync(this, key, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs index f128c895..20fd91bc 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientHash.Generic.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -19,7 +19,7 @@ namespace ServiceStack.Redis.Generic /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientHash + internal partial class RedisClientHash : IRedisHash { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisClientList.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.Async.cs new file mode 100644 index 00000000..66b3e596 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.Async.cs @@ -0,0 +1,184 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientList + : IRedisListAsync + { + IRedisTypedClientAsync AsyncClient => client; + IRedisListAsync AsAsync() => this; + + async ValueTask IRedisListAsync.AddRangeAsync(IEnumerable values, CancellationToken token) + { + //TODO: replace it with a pipeline implementation ala AddRangeToSet + foreach (var value in values) + { + await AsyncClient.AddItemToListAsync(this, value, token).ConfigureAwait(false); + } + } + + ValueTask IRedisListAsync.AppendAsync(T value, CancellationToken token) + => AsyncClient.AddItemToListAsync(this, value, token); + + ValueTask IRedisListAsync.BlockingDequeueAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingDequeueItemFromListAsync(this, timeOut, token); + + ValueTask IRedisListAsync.BlockingPopAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingPopItemFromListAsync(this, timeOut, token); + + ValueTask IRedisListAsync.BlockingRemoveStartAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingRemoveStartFromListAsync(this, timeOut, token); + + ValueTask IRedisListAsync.CountAsync(CancellationToken token) + => AsyncClient.GetListCountAsync(this, token).AsInt32(); + + ValueTask IRedisListAsync.DequeueAsync(CancellationToken token) + => AsyncClient.DequeueItemFromListAsync(this, token); + + ValueTask IRedisListAsync.EnqueueAsync(T value, CancellationToken token) + => AsyncClient.EnqueueItemOnListAsync(this, value, token); + + ValueTask> IRedisListAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromListAsync(this, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromListAsync(this, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator() + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetRangeFromListAsync(this, skip, PageLimit, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask> IRedisListAsync.GetRangeAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetRangeFromListAsync(this, startingFrom, endingAt, token); + + ValueTask> IRedisListAsync.GetRangeFromSortedListAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.SortListAsync(this, startingFrom, endingAt, token); + + ValueTask IRedisListAsync.PopAndPushAsync(IRedisListAsync toList, CancellationToken token) + => AsyncClient.PopAndPushItemBetweenListsAsync(this, toList, token); + + ValueTask IRedisListAsync.PopAsync(CancellationToken token) + => AsyncClient.PopItemFromListAsync(this, token); + + ValueTask IRedisListAsync.PrependAsync(T value, CancellationToken token) + => AsyncClient.PrependItemToListAsync(this, value, token); + + ValueTask IRedisListAsync.PushAsync(T value, CancellationToken token) + => AsyncClient.PushItemToListAsync(this, value, token); + + ValueTask IRedisListAsync.RemoveAllAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(this, token); + + ValueTask IRedisListAsync.RemoveEndAsync(CancellationToken token) + => AsyncClient.RemoveEndFromListAsync(this, token); + + ValueTask IRedisListAsync.RemoveStartAsync(CancellationToken token) + => AsyncClient.RemoveStartFromListAsync(this, token); + + ValueTask IRedisListAsync.RemoveValueAsync(T value, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(this, value, token); + + ValueTask IRedisListAsync.RemoveValueAsync(T value, int noOfMatches, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(this, value, noOfMatches, token); + + ValueTask IRedisListAsync.TrimAsync(int keepStartingFrom, int keepEndingAt, CancellationToken token) + => AsyncClient.TrimListAsync(this, keepStartingFrom, keepEndingAt, token); + + async ValueTask IRedisListAsync.RemoveAsync(T value, CancellationToken token) + { + var index = await AsAsync().IndexOfAsync(value, token).ConfigureAwait(false); + if (index != -1) + { + await AsAsync().RemoveAtAsync(index, token).ConfigureAwait(false); + return true; + } + return false; + } + + ValueTask IRedisListAsync.AddAsync(T value, CancellationToken token) + => AsyncClient.AddItemToListAsync(this, value, token); + + async ValueTask IRedisListAsync.RemoveAtAsync(int index, CancellationToken token) + { + //TODO: replace with native implementation when one exists + + var nativeClient = client.NativeClient as IRedisNativeClientAsync ?? throw new NotSupportedException( + $"The native client ('{client.NativeClient.GetType().Name}') does not implement {nameof(IRedisNativeClientAsync)}"); + + var markForDelete = Guid.NewGuid().ToString(); + await nativeClient.LSetAsync(listId, index, Encoding.UTF8.GetBytes(markForDelete), token).ConfigureAwait(false); + + const int removeAll = 0; + await nativeClient.LRemAsync(listId, removeAll, Encoding.UTF8.GetBytes(markForDelete), token).ConfigureAwait(false); + } + + async ValueTask IRedisListAsync.ContainsAsync(T value, CancellationToken token) + { + //TODO: replace with native implementation when exists + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (Equals(existingItem, value)) return true; + } + return false; + } + + ValueTask IRedisListAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(this, token); + + async ValueTask IRedisListAsync.IndexOfAsync(T value, CancellationToken token) + { + //TODO: replace with native implementation when exists + var i = 0; + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (Equals(existingItem, value)) return i; + i++; + } + return -1; + } + + ValueTask IRedisListAsync.ElementAtAsync(int index, CancellationToken token) + => AsyncClient.GetItemFromListAsync(this, index, token); + + ValueTask IRedisListAsync.SetValueAsync(int index, T value, CancellationToken token) + => AsyncClient.SetItemInListAsync(this, index, value, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs index 41eb1ae9..61cd0049 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientList.Generic.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -17,7 +17,7 @@ namespace ServiceStack.Redis.Generic { - internal class RedisClientList + internal partial class RedisClientList : IRedisList { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.Async.cs new file mode 100644 index 00000000..0d0ba19b --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.Async.cs @@ -0,0 +1,109 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientSet + : IRedisSetAsync + { + IRedisTypedClientAsync AsyncClient => client; + + ValueTask IRedisSetAsync.AddAsync(T value, CancellationToken token) + => AsyncClient.AddItemToSetAsync(this, value, token); + + IRedisSetAsync AsAsync() => this; + + ValueTask IRedisSetAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveEntryAsync(setId, token).Await(); + + ValueTask IRedisSetAsync.ContainsAsync(T item, CancellationToken token) + => AsyncClient.SetContainsItemAsync(this, item, token); + + ValueTask IRedisSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSetCountAsync(this, token).AsInt32(); + + ValueTask> IRedisSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSetAsync(this, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromSetAsync(this, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetSortedEntryValuesAsync(this, skip, skip + PageLimit - 1, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask IRedisSetAsync.GetDifferencesAsync(IRedisSetAsync[] withSets, CancellationToken token) + => AsyncClient.StoreUnionFromSetsAsync(this, withSets, token); + + ValueTask IRedisSetAsync.GetDifferencesAsync(params IRedisSetAsync[] withSets) + => AsAsync().GetDifferencesAsync(withSets, token: default); + + ValueTask IRedisSetAsync.GetRandomItemAsync(CancellationToken token) + => AsyncClient.GetRandomItemFromSetAsync(this, token); + + ValueTask IRedisSetAsync.MoveToAsync(T item, IRedisSetAsync toSet, CancellationToken token) + => AsyncClient.MoveBetweenSetsAsync(this, toSet, item, token); + + ValueTask IRedisSetAsync.PopRandomItemAsync(CancellationToken token) + => AsyncClient.PopItemFromSetAsync(this, token); + + ValueTask IRedisSetAsync.PopulateWithDifferencesOfAsync(IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + => AsyncClient.StoreDifferencesFromSetAsync(this, fromSet, withSets, token); + + ValueTask IRedisSetAsync.PopulateWithDifferencesOfAsync(IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().PopulateWithDifferencesOfAsync(fromSet, withSets, token: default); + + ValueTask IRedisSetAsync.PopulateWithIntersectOfAsync(IRedisSetAsync[] sets, CancellationToken token) + => AsyncClient.StoreIntersectFromSetsAsync(this, sets, token); + + ValueTask IRedisSetAsync.PopulateWithIntersectOfAsync(params IRedisSetAsync[] sets) + => AsAsync().PopulateWithIntersectOfAsync(sets, token: default); + + ValueTask IRedisSetAsync.PopulateWithUnionOfAsync(IRedisSetAsync[] sets, CancellationToken token) + => AsyncClient.StoreUnionFromSetsAsync(this, sets, token); + + ValueTask IRedisSetAsync.PopulateWithUnionOfAsync(params IRedisSetAsync[] sets) + => AsAsync().PopulateWithUnionOfAsync(sets, token: default); + + ValueTask IRedisSetAsync.RemoveAsync(T value, CancellationToken token) + => AsyncClient.RemoveItemFromSetAsync(this, value, token).AwaitAsTrue(); // see Remove for why "true" + + ValueTask> IRedisSetAsync.SortAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetSortedEntryValuesAsync(this, startingFrom, endingAt, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs index c6a2514a..5c48dd6a 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientSet.Generic.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -19,7 +19,7 @@ namespace ServiceStack.Redis.Generic /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientSet + internal partial class RedisClientSet : IRedisSet { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.Async.cs b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.Async.cs new file mode 100644 index 00000000..fae91472 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.Async.cs @@ -0,0 +1,136 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + internal partial class RedisClientSortedSet + : IRedisSortedSetAsync + { + IRedisTypedClientAsync AsyncClient => client; + + IRedisSortedSetAsync AsAsync() => this; + + ValueTask IRedisSortedSetAsync.AddAsync(T item, double score, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(this, item, score, token); + + ValueTask IRedisSortedSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSortedSetCountAsync(this, token).AsInt32(); + + ValueTask> IRedisSortedSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetAsync(this, token); + + ValueTask> IRedisSortedSetAsync.GetAllDescendingAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetDescAsync(this, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromSortedSetAsync(this, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator(); + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetRangeFromSortedSetAsync(this, skip, skip + PageLimit - 1, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask IRedisSortedSetAsync.GetItemScoreAsync(T item, CancellationToken token) + => AsyncClient.GetItemScoreInSortedSetAsync(this, item, token); + + ValueTask> IRedisSortedSetAsync.GetRangeAsync(int fromRank, int toRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetAsync(this, fromRank, toRank, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByHighestScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(this, fromScore, toScore, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByHighestScoreAsync(double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(this, fromScore, toScore, skip, take, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByLowestScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(this, fromScore, toScore, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByLowestScoreAsync(double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(this, fromScore, toScore, skip, take, token); + + ValueTask IRedisSortedSetAsync.IncrementItemAsync(T item, double incrementBy, CancellationToken token) + => AsyncClient.IncrementItemInSortedSetAsync(this, item, incrementBy, token); + + ValueTask IRedisSortedSetAsync.IndexOfAsync(T item, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetAsync(this, item, token).AsInt32(); + + ValueTask IRedisSortedSetAsync.IndexOfDescendingAsync(T item, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetDescAsync(this, item, token); + + ValueTask IRedisSortedSetAsync.PopItemWithHighestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithHighestScoreFromSortedSetAsync(this, token); + + ValueTask IRedisSortedSetAsync.PopItemWithLowestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithLowestScoreFromSortedSetAsync(this, token); + + ValueTask IRedisSortedSetAsync.PopulateWithIntersectOfAsync(IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(this, setIds, token); + + ValueTask IRedisSortedSetAsync.PopulateWithIntersectOfAsync(IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(this, setIds, args, token); + + ValueTask IRedisSortedSetAsync.PopulateWithUnionOfAsync(IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(this, setIds, token); + + ValueTask IRedisSortedSetAsync.PopulateWithUnionOfAsync(IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(this, setIds, args, token); + + ValueTask IRedisSortedSetAsync.RemoveRangeAsync(int minRank, int maxRank, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetAsync(this, minRank, maxRank, token); + + ValueTask IRedisSortedSetAsync.RemoveRangeByScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetByScoreAsync(this, fromScore, toScore, token); + + ValueTask IRedisSortedSetAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveEntryAsync(setId, token).Await(); + + ValueTask IRedisSortedSetAsync.ContainsAsync(T value, CancellationToken token) + => AsyncClient.SortedSetContainsItemAsync(this, value, token); + + ValueTask IRedisSortedSetAsync.AddAsync(T value, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(this, value, token); + + ValueTask IRedisSortedSetAsync.RemoveAsync(T value, CancellationToken token) + => AsyncClient.RemoveItemFromSortedSetAsync(this, value, token).AwaitAsTrue(); // see Remove for why "true" + + ValueTask IRedisSortedSetAsync.PopulateWithIntersectOfAsync(params IRedisSortedSetAsync[] setIds) + => AsAsync().PopulateWithIntersectOfAsync(setIds, token: default); + + ValueTask IRedisSortedSetAsync.PopulateWithUnionOfAsync(params IRedisSortedSetAsync[] setIds) + => AsAsync().PopulateWithUnionOfAsync(setIds, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs index bda2f1eb..5e7fdf13 100644 --- a/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs +++ b/src/ServiceStack.Redis/Generic/RedisClientSortedSet.Generic.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -19,7 +19,7 @@ namespace ServiceStack.Redis.Generic /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientSortedSet + internal partial class RedisClientSortedSet : IRedisSortedSet { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient.Async.cs new file mode 100644 index 00000000..01018721 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient.Async.cs @@ -0,0 +1,765 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Internal; +using ServiceStack.Text; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + partial class RedisTypedClient + : IRedisTypedClientAsync + { + public IRedisTypedClientAsync AsAsync() => this; + + private IRedisClientAsync AsyncClient => client; + private IRedisNativeClientAsync AsyncNative => client; + + IRedisSetAsync IRedisTypedClientAsync.TypeIdsSet => TypeIdsSetRaw; + + IRedisClientAsync IRedisTypedClientAsync.RedisClient => client; + + internal ValueTask ExpectQueuedAsync(CancellationToken token) + => client.ExpectQueuedAsync(token); + + internal ValueTask ExpectOkAsync(CancellationToken token) + => client.ExpectOkAsync(token); + + internal ValueTask ReadMultiDataResultCountAsync(CancellationToken token) + => client.ReadMultiDataResultCountAsync(token); + + ValueTask IRedisTypedClientAsync.GetValueAsync(string key, CancellationToken token) + => DeserializeValueAsync(AsyncNative.GetAsync(key, token)); + + async ValueTask IRedisTypedClientAsync.SetValueAsync(string key, T entity, CancellationToken token) + { + AssertNotNull(key); + await AsyncClient.SetAsync(key, SerializeValue(entity), token).ConfigureAwait(false); + await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + } + + Task IEntityStoreAsync.GetByIdAsync(object id, CancellationToken token) + { + var key = client.UrnKey(id); + return AsAsync().GetValueAsync(key, token).AsTask(); + } + + internal ValueTask FlushSendBufferAsync(CancellationToken token) + => client.FlushSendBufferAsync(token); + + internal ValueTask AddTypeIdsRegisteredDuringPipelineAsync(CancellationToken token) + => client.AddTypeIdsRegisteredDuringPipelineAsync(token); + + async Task> IEntityStoreAsync.GetByIdsAsync(IEnumerable ids, CancellationToken token) + { + if (ids != null) + { + var urnKeys = ids.Map(x => client.UrnKey(x)); + if (urnKeys.Count != 0) + return await AsAsync().GetValuesAsync(urnKeys, token).ConfigureAwait(false); + } + + return new List(); + } + + async Task> IEntityStoreAsync.GetAllAsync(CancellationToken token) + { + var allKeys = await AsyncClient.GetAllItemsFromSetAsync(this.TypeIdsSetKey, token).ConfigureAwait(false); + return await AsAsync().GetByIdsAsync(allKeys.ToArray(), token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.StoreAsync(T entity, CancellationToken token) + { + var urnKey = client.UrnKey(entity); + await AsAsync().SetValueAsync(urnKey, entity, token).ConfigureAwait(false); + return entity; + } + + async Task IEntityStoreAsync.StoreAllAsync(IEnumerable entities, CancellationToken token) + { + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + await AsyncNative.MSetAsync(keys, values, token).ConfigureAwait(false); + await client.RegisterTypeIdsAsync(entitiesList, token).ConfigureAwait(false); + } + } + + async Task IEntityStoreAsync.DeleteAsync(T entity, CancellationToken token) + { + var urnKey = client.UrnKey(entity); + await AsyncClient.RemoveEntryAsync(new[] { urnKey }, token).ConfigureAwait(false); + await client.RemoveTypeIdsByValueAsync(entity, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdAsync(object id, CancellationToken token) + { + var urnKey = client.UrnKey(id); + + await AsyncClient.RemoveEntryAsync(new[] { urnKey }, token).ConfigureAwait(false); + await client.RemoveTypeIdsByIdAsync(id.ToString(), token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdsAsync(IEnumerable ids, CancellationToken token) + { + if (ids == null) return; + + var idStrings = ids.Cast().Select(x => x.ToString()).ToArray(); + var urnKeys = idStrings.Select(t => client.UrnKey(t)).ToArray(); + if (urnKeys.Length > 0) + { + await AsyncClient.RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + await client.RemoveTypeIdsByIdsAsync(idStrings, token).ConfigureAwait(false); + } + } + + async Task IEntityStoreAsync.DeleteAllAsync(CancellationToken token) + { + await DeleteAllAsync(0,RedisConfig.CommandKeysBatchSize, token).ConfigureAwait(false); + } + + private async Task DeleteAllAsync(ulong cursor, int batchSize, CancellationToken token) + { + do + { + var scanResult = await AsyncNative.SScanAsync(this.TypeIdsSetKey, cursor, batchSize, token: token).ConfigureAwait(false); + cursor = scanResult.Cursor; + var urnKeys = scanResult.Results.Select(x => client.UrnKey(Encoding.UTF8.GetString(x))).ToArray(); + if (urnKeys.Length > 0) + { + await AsyncClient.RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + } + } while (cursor != 0); + await AsyncClient.RemoveEntryAsync(new[] { this.TypeIdsSetKey }, token).ConfigureAwait(false); + } + + async ValueTask> IRedisTypedClientAsync.GetValuesAsync(List keys, CancellationToken token) + { + if (keys.IsNullOrEmpty()) return new List(); + + var resultBytesArray = await AsyncNative.MGetAsync(keys.ToArray(), token).ConfigureAwait(false); + return ProcessGetValues(resultBytesArray); + } + + ValueTask> IRedisTypedClientAsync.CreateTransactionAsync(CancellationToken token) + { + IRedisTypedTransactionAsync obj = new RedisTypedTransaction(this, true); + return obj.AsValueTaskResult(); + } + + IRedisTypedPipelineAsync IRedisTypedClientAsync.CreatePipeline() + => new RedisTypedPipeline(this); + + + ValueTask IRedisTypedClientAsync.AcquireLockAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.AcquireLockAsync(this.TypeLockKey, timeOut, token); + + long IRedisTypedClientAsync.Db => AsyncClient.Db; + + IHasNamed> IRedisTypedClientAsync.Lists => Lists as IHasNamed> ?? throw new NotSupportedException("The provided Lists does not support IRedisListAsync"); + IHasNamed> IRedisTypedClientAsync.Sets => Sets as IHasNamed> ?? throw new NotSupportedException("The provided Sets does not support IRedisSetAsync"); + IHasNamed> IRedisTypedClientAsync.SortedSets => SortedSets as IHasNamed> ?? throw new NotSupportedException("The provided SortedSets does not support IRedisSortedSetAsync"); + + IRedisHashAsync IRedisTypedClientAsync.GetHash(string hashId) => GetHash(hashId) as IRedisHashAsync ?? throw new NotSupportedException("The provided Hash does not support IRedisHashAsync"); + + ValueTask IRedisTypedClientAsync.SelectAsync(long db, CancellationToken token) + => AsyncClient.SelectAsync(db, token); + + ValueTask> IRedisTypedClientAsync.GetAllKeysAsync(CancellationToken token) + => AsyncClient.GetAllKeysAsync(token); + + ValueTask IRedisTypedClientAsync.SetSequenceAsync(int value, CancellationToken token) + => AsyncNative.GetSetAsync(SequenceKey, Encoding.UTF8.GetBytes(value.ToString()), token).Await(); + + ValueTask IRedisTypedClientAsync.GetNextSequenceAsync(CancellationToken token) + => AsAsync().IncrementValueAsync(SequenceKey, token); + + ValueTask IRedisTypedClientAsync.GetNextSequenceAsync(int incrBy, CancellationToken token) + => AsAsync().IncrementValueByAsync(SequenceKey, incrBy, token); + + ValueTask IRedisTypedClientAsync.GetEntryTypeAsync(string key, CancellationToken token) + => AsyncClient.GetEntryTypeAsync(key, token); + + ValueTask IRedisTypedClientAsync.GetRandomKeyAsync(CancellationToken token) + => AsyncClient.GetRandomKeyAsync(token); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void AssertNotNull(object obj, string name = "key") + { + if (obj is null) Throw(name); + static void Throw(string name) => throw new ArgumentNullException(name); + } + + async ValueTask IRedisTypedClientAsync.SetValueAsync(string key, T entity, TimeSpan expireIn, CancellationToken token) + { + AssertNotNull(key); + await AsyncClient.SetAsync(key, SerializeValue(entity), expireIn, token).ConfigureAwait(false); + await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + } + + async ValueTask IRedisTypedClientAsync.SetValueIfNotExistsAsync(string key, T entity, CancellationToken token) + { + var success = await AsyncNative.SetNXAsync(key, SerializeValue(entity), token).IsSuccessAsync().ConfigureAwait(false); + if (success) await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + return success; + } + + async ValueTask IRedisTypedClientAsync.SetValueIfExistsAsync(string key, T entity, CancellationToken token) + { + var success = await AsyncNative.SetAsync(key, SerializeValue(entity), exists: true, token: token).ConfigureAwait(false); + if (success) await client.RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + return success; + } + + async ValueTask IRedisTypedClientAsync.StoreAsync(T entity, TimeSpan expireIn, CancellationToken token) + { + var urnKey = client.UrnKey(entity); + await AsAsync().SetValueAsync(urnKey, entity, expireIn, token).ConfigureAwait(false); + return entity; + } + + ValueTask IRedisTypedClientAsync.GetAndSetValueAsync(string key, T value, CancellationToken token) + => DeserializeValueAsync(AsyncNative.GetSetAsync(key, SerializeValue(value), token)); + + ValueTask IRedisTypedClientAsync.ContainsKeyAsync(string key, CancellationToken token) + => AsyncNative.ExistsAsync(key, token).IsSuccessAsync(); + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(string key, CancellationToken token) + => AsyncNative.DelAsync(key, token).IsSuccessAsync(); + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(string[] keys, CancellationToken token) + => AsyncNative.DelAsync(keys, token).IsSuccessAsync(); + + async ValueTask IRedisTypedClientAsync.RemoveEntryAsync(IHasStringId[] entities, CancellationToken token) + { + var ids = entities.Select(x => x.Id).ToArray(); + var success = await AsyncNative.DelAsync(ids, token).IsSuccessAsync().ConfigureAwait(false); + if (success) await client.RemoveTypeIdsByValuesAsync(ids, token).ConfigureAwait(false); + return success; + } + + ValueTask IRedisTypedClientAsync.IncrementValueAsync(string key, CancellationToken token) + => AsyncNative.IncrAsync(key, token); + + ValueTask IRedisTypedClientAsync.IncrementValueByAsync(string key, int count, CancellationToken token) + => AsyncNative.IncrByAsync(key, count, token); + + ValueTask IRedisTypedClientAsync.DecrementValueAsync(string key, CancellationToken token) + => AsyncNative.DecrAsync(key, token); + + ValueTask IRedisTypedClientAsync.DecrementValueByAsync(string key, int count, CancellationToken token) + => AsyncNative.DecrByAsync(key, count, token); + + ValueTask IRedisTypedClientAsync.ExpireInAsync(object id, TimeSpan expiresIn, CancellationToken token) + { + var key = client.UrnKey(id); + return AsyncClient.ExpireEntryInAsync(key, expiresIn, token); + } + + ValueTask IRedisTypedClientAsync.ExpireAtAsync(object id, DateTime expireAt, CancellationToken token) + { + var key = client.UrnKey(id); + return AsyncClient.ExpireEntryAtAsync(key, expireAt, token); + } + + ValueTask IRedisTypedClientAsync.ExpireEntryInAsync(string key, TimeSpan expireIn, CancellationToken token) + => AsyncClient.ExpireEntryInAsync(key, expireIn, token); + + ValueTask IRedisTypedClientAsync.ExpireEntryAtAsync(string key, DateTime expireAt, CancellationToken token) + => AsyncClient.ExpireEntryAtAsync(key, expireAt, token); + + async ValueTask IRedisTypedClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + => TimeSpan.FromSeconds(await AsyncNative.TtlAsync(key, token).ConfigureAwait(false)); + + ValueTask IRedisTypedClientAsync.ForegroundSaveAsync(CancellationToken token) + => AsyncClient.ForegroundSaveAsync(token); + + ValueTask IRedisTypedClientAsync.BackgroundSaveAsync(CancellationToken token) + => AsyncClient.BackgroundSaveAsync(token); + + ValueTask IRedisTypedClientAsync.FlushDbAsync(CancellationToken token) + => AsyncClient.FlushDbAsync(token); + + ValueTask IRedisTypedClientAsync.FlushAllAsync(CancellationToken token) + => new ValueTask(AsyncClient.FlushAllAsync(token)); + + async ValueTask IRedisTypedClientAsync.SearchKeysAsync(string pattern, CancellationToken token) + { + var strKeys = await AsyncClient.SearchKeysAsync(pattern, token).ConfigureAwait(false); + return SearchKeysParse(strKeys); + } + + private ValueTask> CreateList(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? CreateList(pending.Result).AsValueTaskResult() : Awaited(this, pending); + static async ValueTask> Awaited(RedisTypedClient obj, ValueTask pending) + => obj.CreateList(await pending.ConfigureAwait(false)); + } + private ValueTask DeserializeValueAsync(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? DeserializeValue(pending.Result).AsValueTaskResult() : Awaited(this, pending); + static async ValueTask Awaited(RedisTypedClient obj, ValueTask pending) + => obj.DeserializeValue(await pending.ConfigureAwait(false)); + } + + private static ValueTask DeserializeFromStringAsync(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? DeserializeFromString(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => DeserializeFromString(await pending.ConfigureAwait(false)); + } + + private static ValueTask> CreateGenericMapAsync(ValueTask> pending) + { + return pending.IsCompletedSuccessfully ? CreateGenericMap(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask> pending) + => CreateGenericMap(await pending.ConfigureAwait(false)); + } + + private static ValueTask> ConvertEachToAsync(ValueTask> pending) + { + return pending.IsCompletedSuccessfully ? ConvertEachTo(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask> pending) + => ConvertEachTo(await pending.ConfigureAwait(false)); + } + + ValueTask> IRedisTypedClientAsync.GetSortedEntryValuesAsync(IRedisSetAsync fromSet, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = AsyncNative.SortAsync(fromSet.Id, sortOptions, token); + return CreateList(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreAsHashAsync(T entity, CancellationToken token) + => AsyncClient.StoreAsHashAsync(entity, token); + + ValueTask IRedisTypedClientAsync.GetFromHashAsync(object id, CancellationToken token) + => AsyncClient.GetFromHashAsync(id, token); + + async ValueTask> IRedisTypedClientAsync.GetAllItemsFromSetAsync(IRedisSetAsync fromSet, CancellationToken token) + { + var multiDataList = await AsyncNative.SMembersAsync(fromSet.Id, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.AddItemToSetAsync(IRedisSetAsync toSet, T item, CancellationToken token) + => AsyncNative.SAddAsync(toSet.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.RemoveItemFromSetAsync(IRedisSetAsync fromSet, T item, CancellationToken token) + => AsyncNative.SRemAsync(fromSet.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.PopItemFromSetAsync(IRedisSetAsync fromSet, CancellationToken token) + => DeserializeValueAsync(AsyncNative.SPopAsync(fromSet.Id, token)); + + ValueTask IRedisTypedClientAsync.MoveBetweenSetsAsync(IRedisSetAsync fromSet, IRedisSetAsync toSet, T item, CancellationToken token) + => AsyncNative.SMoveAsync(fromSet.Id, toSet.Id, SerializeValue(item), token); + + ValueTask IRedisTypedClientAsync.GetSetCountAsync(IRedisSetAsync set, CancellationToken token) + => AsyncNative.SCardAsync(set.Id, token); + + ValueTask IRedisTypedClientAsync.SetContainsItemAsync(IRedisSetAsync set, T item, CancellationToken token) + => AsyncNative.SIsMemberAsync(set.Id, SerializeValue(item), token).IsSuccessAsync(); + + async ValueTask> IRedisTypedClientAsync.GetIntersectFromSetsAsync(IRedisSetAsync[] sets, CancellationToken token) + { + var multiDataList = await AsyncNative.SInterAsync(sets.Map(x => x.Id).ToArray(), token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSetsAsync(IRedisSetAsync intoSet, IRedisSetAsync[] sets, CancellationToken token) + => AsyncNative.SInterStoreAsync(intoSet.Id, sets.Map(x => x.Id).ToArray(), token); + + async ValueTask> IRedisTypedClientAsync.GetUnionFromSetsAsync(IRedisSetAsync[] sets, CancellationToken token) + { + var multiDataList = await AsyncNative.SUnionAsync(sets.Map(x => x.Id).ToArray(), token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreUnionFromSetsAsync(IRedisSetAsync intoSet, IRedisSetAsync[] sets, CancellationToken token) + => AsyncNative.SUnionStoreAsync(intoSet.Id, sets.Map(x => x.Id).ToArray(), token); + + async ValueTask> IRedisTypedClientAsync.GetDifferencesFromSetAsync(IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + { + var multiDataList = await AsyncNative.SDiffAsync(fromSet.Id, withSets.Map(x => x.Id).ToArray(), token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisTypedClientAsync.StoreDifferencesFromSetAsync(IRedisSetAsync intoSet, IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + => AsyncNative.SDiffStoreAsync(intoSet.Id, fromSet.Id, withSets.Map(x => x.Id).ToArray(), token); + + ValueTask IRedisTypedClientAsync.GetRandomItemFromSetAsync(IRedisSetAsync fromSet, CancellationToken token) + => DeserializeValueAsync(AsyncNative.SRandMemberAsync(fromSet.Id, token)); + + ValueTask> IRedisTypedClientAsync.GetAllItemsFromListAsync(IRedisListAsync fromList, CancellationToken token) + { + var multiDataList = AsyncNative.LRangeAsync(fromList.Id, FirstElement, LastElement, token); + return CreateList(multiDataList); + } + + ValueTask> IRedisTypedClientAsync.GetRangeFromListAsync(IRedisListAsync fromList, int startingFrom, int endingAt, CancellationToken token) + { + var multiDataList = AsyncNative.LRangeAsync(fromList.Id, startingFrom, endingAt, token); + return CreateList(multiDataList); + } + + ValueTask> IRedisTypedClientAsync.SortListAsync(IRedisListAsync fromList, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + var multiDataList = AsyncNative.SortAsync(fromList.Id, sortOptions, token); + return CreateList(multiDataList); + } + + ValueTask IRedisTypedClientAsync.AddItemToListAsync(IRedisListAsync fromList, T value, CancellationToken token) + => AsyncNative.RPushAsync(fromList.Id, SerializeValue(value), token).Await(); + + ValueTask IRedisTypedClientAsync.PrependItemToListAsync(IRedisListAsync fromList, T value, CancellationToken token) + => AsyncNative.LPushAsync(fromList.Id, SerializeValue(value), token).Await(); + + ValueTask IRedisTypedClientAsync.RemoveStartFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.LPopAsync(fromList.Id, token)); + + async ValueTask IRedisTypedClientAsync.BlockingRemoveStartFromListAsync(IRedisListAsync fromList, TimeSpan? timeOut, CancellationToken token) + { + var unblockingKeyAndValue = await AsyncNative.BLPopAsync(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + return unblockingKeyAndValue.Length == 0 + ? default + : DeserializeValue(unblockingKeyAndValue[1]); + } + + ValueTask IRedisTypedClientAsync.RemoveEndFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopAsync(fromList.Id, token)); + + ValueTask IRedisTypedClientAsync.RemoveAllFromListAsync(IRedisListAsync fromList, CancellationToken token) + => AsyncNative.LTrimAsync(fromList.Id, int.MaxValue, FirstElement, token); + + ValueTask IRedisTypedClientAsync.TrimListAsync(IRedisListAsync fromList, int keepStartingFrom, int keepEndingAt, CancellationToken token) + => AsyncNative.LTrimAsync(fromList.Id, keepStartingFrom, keepEndingAt, token); + + ValueTask IRedisTypedClientAsync.RemoveItemFromListAsync(IRedisListAsync fromList, T value, CancellationToken token) + { + const int removeAll = 0; + return AsyncNative.LRemAsync(fromList.Id, removeAll, SerializeValue(value), token); + } + + ValueTask IRedisTypedClientAsync.RemoveItemFromListAsync(IRedisListAsync fromList, T value, int noOfMatches, CancellationToken token) + => AsyncNative.LRemAsync(fromList.Id, noOfMatches, SerializeValue(value), token); + + ValueTask IRedisTypedClientAsync.GetListCountAsync(IRedisListAsync fromList, CancellationToken token) + => AsyncNative.LLenAsync(fromList.Id, token); + + ValueTask IRedisTypedClientAsync.GetItemFromListAsync(IRedisListAsync fromList, int listIndex, CancellationToken token) + => DeserializeValueAsync(AsyncNative.LIndexAsync(fromList.Id, listIndex, token)); + + ValueTask IRedisTypedClientAsync.SetItemInListAsync(IRedisListAsync toList, int listIndex, T value, CancellationToken token) + => AsyncNative.LSetAsync(toList.Id, listIndex, SerializeValue(value), token); + + ValueTask IRedisTypedClientAsync.InsertBeforeItemInListAsync(IRedisListAsync toList, T pivot, T value, CancellationToken token) + => AsyncNative.LInsertAsync(toList.Id, insertBefore: true, pivot: SerializeValue(pivot), value: SerializeValue(value), token: token); + + ValueTask IRedisTypedClientAsync.InsertAfterItemInListAsync(IRedisListAsync toList, T pivot, T value, CancellationToken token) + => AsyncNative.LInsertAsync(toList.Id, insertBefore: false, pivot: SerializeValue(pivot), value: SerializeValue(value), token: token); + + ValueTask IRedisTypedClientAsync.EnqueueItemOnListAsync(IRedisListAsync fromList, T item, CancellationToken token) + => AsyncNative.LPushAsync(fromList.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.DequeueItemFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopAsync(fromList.Id, token)); + + async ValueTask IRedisTypedClientAsync.BlockingDequeueItemFromListAsync(IRedisListAsync fromList, TimeSpan? timeOut, CancellationToken token) + { + var unblockingKeyAndValue = await AsyncNative.BRPopAsync(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + return unblockingKeyAndValue.Length == 0 + ? default + : DeserializeValue(unblockingKeyAndValue[1]); + } + + ValueTask IRedisTypedClientAsync.PushItemToListAsync(IRedisListAsync fromList, T item, CancellationToken token) + => AsyncNative.RPushAsync(fromList.Id, SerializeValue(item), token).Await(); + + ValueTask IRedisTypedClientAsync.PopItemFromListAsync(IRedisListAsync fromList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopAsync(fromList.Id, token)); + + async ValueTask IRedisTypedClientAsync.BlockingPopItemFromListAsync(IRedisListAsync fromList, TimeSpan? timeOut, CancellationToken token) + { + var unblockingKeyAndValue = await AsyncNative.BRPopAsync(fromList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + return unblockingKeyAndValue.Length == 0 + ? default + : DeserializeValue(unblockingKeyAndValue[1]); + } + + ValueTask IRedisTypedClientAsync.PopAndPushItemBetweenListsAsync(IRedisListAsync fromList, IRedisListAsync toList, CancellationToken token) + => DeserializeValueAsync(AsyncNative.RPopLPushAsync(fromList.Id, toList.Id, token)); + + ValueTask IRedisTypedClientAsync.BlockingPopAndPushItemBetweenListsAsync(IRedisListAsync fromList, IRedisListAsync toList, TimeSpan? timeOut, CancellationToken token) + => DeserializeValueAsync(AsyncNative.BRPopLPushAsync(fromList.Id, toList.Id, (int)timeOut.GetValueOrDefault().TotalSeconds, token)); + + ValueTask IRedisTypedClientAsync.AddItemToSortedSetAsync(IRedisSortedSetAsync toSet, T value, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(toSet.Id, value.SerializeToString(), token).Await(); + + ValueTask IRedisTypedClientAsync.AddItemToSortedSetAsync(IRedisSortedSetAsync toSet, T value, double score, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(toSet.Id, value.SerializeToString(), score, token).Await(); + + ValueTask IRedisTypedClientAsync.RemoveItemFromSortedSetAsync(IRedisSortedSetAsync fromSet, T value, CancellationToken token) + => AsyncClient.RemoveItemFromSortedSetAsync(fromSet.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.PopItemWithLowestScoreFromSortedSetAsync(IRedisSortedSetAsync fromSet, CancellationToken token) + => DeserializeFromStringAsync(AsyncClient.PopItemWithLowestScoreFromSortedSetAsync(fromSet.Id, token)); + + ValueTask IRedisTypedClientAsync.PopItemWithHighestScoreFromSortedSetAsync(IRedisSortedSetAsync fromSet, CancellationToken token) + => DeserializeFromStringAsync(AsyncClient.PopItemWithHighestScoreFromSortedSetAsync(fromSet.Id, token)); + + ValueTask IRedisTypedClientAsync.SortedSetContainsItemAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.SortedSetContainsItemAsync(set.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.IncrementItemInSortedSetAsync(IRedisSortedSetAsync set, T value, double incrementBy, CancellationToken token) + => AsyncClient.IncrementItemInSortedSetAsync(set.Id, value.SerializeToString(), incrementBy, token); + + ValueTask IRedisTypedClientAsync.GetItemIndexInSortedSetAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetAsync(set.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.GetItemIndexInSortedSetDescAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetDescAsync(set.Id, value.SerializeToString(), token); + + ValueTask> IRedisTypedClientAsync.GetAllItemsFromSortedSetAsync(IRedisSortedSetAsync set, CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetAsync(set.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetAllItemsFromSortedSetDescAsync(IRedisSortedSetAsync set, CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetDescAsync(set.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetAsync(set.Id, fromRank, toRank, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetDescAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetDescAsync(set.Id, fromRank, toRank, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetAllWithScoresFromSortedSetAsync(IRedisSortedSetAsync set, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetAsync(set.Id, FirstElement, LastElement, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetAsync(set.Id, fromRank, toRank, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetDescAsync(IRedisSortedSetAsync set, int fromRank, int toRank, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetDescAsync(set.Id, fromRank, toRank, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByLowestScoreAsync(set.Id, fromScore, toScore, skip, take, token)); + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, skip, take, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromStringScore, toStringScore, skip, take, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, token)); + + ValueTask> IRedisTypedClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateGenericMapAsync(AsyncClient.GetRangeWithScoresFromSortedSetByHighestScoreAsync(set.Id, fromScore, toScore, skip, take, token)); + + ValueTask IRedisTypedClientAsync.RemoveRangeFromSortedSetAsync(IRedisSortedSetAsync set, int minRank, int maxRank, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetAsync(set.Id, minRank, maxRank, token); + + ValueTask IRedisTypedClientAsync.RemoveRangeFromSortedSetByScoreAsync(IRedisSortedSetAsync set, double fromScore, double toScore, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetByScoreAsync(set.Id, fromScore, toScore, token); + + ValueTask IRedisTypedClientAsync.GetSortedSetCountAsync(IRedisSortedSetAsync set, CancellationToken token) + => AsyncClient.GetSortedSetCountAsync(set.Id, token); + + ValueTask IRedisTypedClientAsync.GetItemScoreInSortedSetAsync(IRedisSortedSetAsync set, T value, CancellationToken token) + => AsyncClient.GetItemScoreInSortedSetAsync(set.Id, value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), token); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), args, token); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), token); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, IRedisSortedSetAsync[] setIds, string[] args, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(intoSetId.Id, setIds.Map(x => x.Id).ToArray(), args, token); + + ValueTask IRedisTypedClientAsync.HashContainsEntryAsync(IRedisHashAsync hash, TKey key, CancellationToken token) + => AsyncClient.HashContainsEntryAsync(hash.Id, key.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.SetEntryInHashAsync(IRedisHashAsync hash, TKey key, T value, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(hash.Id, key.SerializeToString(), value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.SetEntryInHashIfNotExistsAsync(IRedisHashAsync hash, TKey key, T value, CancellationToken token) + => AsyncClient.SetEntryInHashIfNotExistsAsync(hash.Id, key.SerializeToString(), value.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.SetRangeInHashAsync(IRedisHashAsync hash, IEnumerable> keyValuePairs, CancellationToken token) + { + var stringKeyValuePairs = keyValuePairs.ToList().ConvertAll( + x => new KeyValuePair(x.Key.SerializeToString(), x.Value.SerializeToString())); + + return AsyncClient.SetRangeInHashAsync(hash.Id, stringKeyValuePairs, token); + } + + ValueTask IRedisTypedClientAsync.GetValueFromHashAsync(IRedisHashAsync hash, TKey key, CancellationToken token) + => DeserializeFromStringAsync(AsyncClient.GetValueFromHashAsync(hash.Id, key.SerializeToString(), token)); + + ValueTask IRedisTypedClientAsync.RemoveEntryFromHashAsync(IRedisHashAsync hash, TKey key, CancellationToken token) + => AsyncClient.RemoveEntryFromHashAsync(hash.Id, key.SerializeToString(), token); + + ValueTask IRedisTypedClientAsync.GetHashCountAsync(IRedisHashAsync hash, CancellationToken token) + => AsyncClient.GetHashCountAsync(hash.Id, token); + + ValueTask> IRedisTypedClientAsync.GetHashKeysAsync(IRedisHashAsync hash, CancellationToken token) + => AsyncClient.GetHashKeysAsync(hash.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetHashValuesAsync(IRedisHashAsync hash, CancellationToken token) + => AsyncClient.GetHashValuesAsync(hash.Id, token).ConvertEachToAsync(); + + ValueTask> IRedisTypedClientAsync.GetAllEntriesFromHashAsync(IRedisHashAsync hash, CancellationToken token) + => ConvertEachToAsync(AsyncClient.GetAllEntriesFromHashAsync(hash.Id, token)); + + async ValueTask IRedisTypedClientAsync.StoreRelatedEntitiesAsync(object parentId, List children, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + var childKeys = children.ConvertAll(x => client.UrnKey(x)); + + await using var trans = await AsyncClient.CreateTransactionAsync(token).ConfigureAwait(false); + //Ugly but need access to a generic constraint-free StoreAll method + trans.QueueCommand(c => ((RedisClient)c).StoreAllAsyncImpl(children, token)); + trans.QueueCommand(c => c.AddRangeToSetAsync(childRefKey, childKeys, token)); + + await trans.CommitAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisTypedClientAsync.StoreRelatedEntitiesAsync(object parentId, TChild[] children, CancellationToken token) + => AsAsync().StoreRelatedEntitiesAsync(parentId, new List(children), token); + + ValueTask IRedisTypedClientAsync.DeleteRelatedEntitiesAsync(object parentId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return new ValueTask(AsyncClient.RemoveAsync(childRefKey, token)); + } + + ValueTask IRedisTypedClientAsync.DeleteRelatedEntityAsync(object parentId, object childId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return AsyncClient.RemoveItemFromSetAsync(childRefKey, TypeSerializer.SerializeToString(childId), token); + } + + async ValueTask> IRedisTypedClientAsync.GetRelatedEntitiesAsync(object parentId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + var childKeys = (await AsyncClient.GetAllItemsFromSetAsync(childRefKey, token).ConfigureAwait(false)).ToList(); + + return await AsyncClient.As().GetValuesAsync(childKeys, token).ConfigureAwait(false); + } + + ValueTask IRedisTypedClientAsync.GetRelatedEntitiesCountAsync(object parentId, CancellationToken token) + { + var childRefKey = GetChildReferenceSetKey(parentId); + return AsyncClient.GetSetCountAsync(childRefKey, token); + } + + ValueTask IRedisTypedClientAsync.AddToRecentsListAsync(T value, CancellationToken token) + { + var key = client.UrnKey(value); + var nowScore = DateTime.UtcNow.ToUnixTime(); + return AsyncClient.AddItemToSortedSetAsync(RecentSortedSetKey, key, nowScore, token).Await(); + } + + async ValueTask> IRedisTypedClientAsync.GetLatestFromRecentsListAsync(int skip, int take, CancellationToken token) + { + var toRank = take - 1; + var keys = await AsyncClient.GetRangeFromSortedSetDescAsync(RecentSortedSetKey, skip, toRank, token).ConfigureAwait(false); + var values = await AsAsync().GetValuesAsync(keys, token).ConfigureAwait(false); + return values; + } + + async ValueTask> IRedisTypedClientAsync.GetEarliestFromRecentsListAsync(int skip, int take, CancellationToken token) + { + var toRank = take - 1; + var keys = await AsyncClient.GetRangeFromSortedSetAsync(RecentSortedSetKey, skip, toRank, token).ConfigureAwait(false); + var values = await AsAsync().GetValuesAsync(keys, token).ConfigureAwait(false); + return values; + } + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(params string[] args) + => AsAsync().RemoveEntryAsync(args, token: default); + + ValueTask IRedisTypedClientAsync.RemoveEntryAsync(params IHasStringId[] entities) + => AsAsync().RemoveEntryAsync(entities, token: default); + + ValueTask> IRedisTypedClientAsync.GetIntersectFromSetsAsync(params IRedisSetAsync[] sets) + => AsAsync().GetIntersectFromSetsAsync(sets, token: default); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSetsAsync(IRedisSetAsync intoSet, params IRedisSetAsync[] sets) + => AsAsync().StoreIntersectFromSetsAsync(intoSet, sets, token: default); + + ValueTask> IRedisTypedClientAsync.GetUnionFromSetsAsync(params IRedisSetAsync[] sets) + => AsAsync().GetUnionFromSetsAsync(sets, token: default); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSetsAsync(IRedisSetAsync intoSet, params IRedisSetAsync[] sets) + => AsAsync().StoreUnionFromSetsAsync(intoSet, sets, token: default); + + ValueTask> IRedisTypedClientAsync.GetDifferencesFromSetAsync(IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().GetDifferencesFromSetAsync(fromSet, withSets, token: default); + + ValueTask IRedisTypedClientAsync.StoreDifferencesFromSetAsync(IRedisSetAsync intoSet, IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().StoreDifferencesFromSetAsync(intoSet, fromSet, withSets, token: default); + + ValueTask IRedisTypedClientAsync.StoreIntersectFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, params IRedisSortedSetAsync[] setIds) + => AsAsync().StoreIntersectFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask IRedisTypedClientAsync.StoreUnionFromSortedSetsAsync(IRedisSortedSetAsync intoSetId, params IRedisSortedSetAsync[] setIds) + => AsAsync().StoreUnionFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask IRedisTypedClientAsync.StoreRelatedEntitiesAsync(object parentId, params TChild[] children) + => AsAsync().StoreRelatedEntitiesAsync(parentId, children, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient.cs index 85c63704..cf070975 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -36,15 +36,9 @@ static RedisTypedClient() readonly ITypeSerializer serializer = new JsonSerializer(); private readonly RedisClient client; - public IRedisClient RedisClient - { - get { return client; } - } + public IRedisClient RedisClient => client; - public IRedisNativeClient NativeClient - { - get { return client; } - } + public IRedisNativeClient NativeClient => client; /// /// Use this to share the same redis connection with another @@ -69,7 +63,7 @@ public RedisTypedClient(RedisClient client) public IRedisTypedTransaction CreateTransaction() { - return new RedisTypedTransaction(this); + return new RedisTypedTransaction(this, false); } public IRedisTypedPipeline CreatePipeline() @@ -88,26 +82,14 @@ public IDisposable AcquireLock(TimeSpan timeOut) public IRedisTransactionBase Transaction { - get - { - return client.Transaction; - } - set - { - client.Transaction = value; - } + get => client.Transaction; + set => client.Transaction = value; } public IRedisPipelineShared Pipeline { - get - { - return client.Pipeline; - } - set - { - client.Pipeline = value; - } + get => client.Pipeline; + set => client.Pipeline = value; } public void Watch(params string[] keys) @@ -155,18 +137,14 @@ public string UrnKey(T entity) return client.UrnKey(entity); } - public IRedisSet TypeIdsSet - { - get - { - return new RedisClientSet(client, client.GetTypeIdsSetKey()); - } - } + public IRedisSet TypeIdsSet => TypeIdsSetRaw; + + private RedisClientSet TypeIdsSetRaw => new RedisClientSet(client, client.GetTypeIdsSetKey()); public T this[string key] { - get { return GetValue(key); } - set { SetEntry(key, value); } + get => GetValue(key); + set => SetValue(key, value); } public byte[] SerializeValue(T value) @@ -184,33 +162,21 @@ public T DeserializeValue(byte[] value) public void SetValue(string key, T entity) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); client.Set(key, SerializeValue(entity)); client.RegisterTypeId(entity); } - [Obsolete("Use SetValue()")] - public void SetEntry(string key, T value) - { - SetValue(key, value); - } - public void SetValue(string key, T entity, TimeSpan expireIn) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); client.Set(key, SerializeValue(entity), expireIn); client.RegisterTypeId(entity); } - [Obsolete("Use SetValue()")] - public void SetEntry(string key, T value, TimeSpan expireIn) - { - SetValue(key, value, expireIn); - } - public bool SetValueIfNotExists(string key, T entity) { var success = client.SetNX(key, SerializeValue(entity)) == RedisNativeClient.Success; @@ -218,12 +184,6 @@ public bool SetValueIfNotExists(string key, T entity) return success; } - [Obsolete("Use SetValueIfNotExists()")] - public bool SetEntryIfNotExists(string key, T value) - { - return SetValueIfNotExists(key, value); - } - public bool SetValueIfExists(string key, T entity) { var success = client.Set(key, SerializeValue(entity), exists:true); @@ -260,7 +220,7 @@ public bool RemoveEntry(params IHasStringId[] entities) { var ids = entities.Map(x => x.Id); var success = client.Del(ids.ToArray()) == RedisNativeClient.Success; - if (success) client.RemoveTypeIds(ids.ToArray()); + if (success) client.RemoveTypeIdsByValues(ids.ToArray()); return success; } @@ -361,6 +321,10 @@ public void FlushAll() public T[] SearchKeys(string pattern) { var strKeys = client.SearchKeys(pattern); + return SearchKeysParse(strKeys); + } + private T[] SearchKeysParse(List strKeys) + { var keysCount = strKeys.Count; var keys = new T[keysCount]; @@ -376,7 +340,10 @@ public List GetValues(List keys) if (keys.IsNullOrEmpty()) return new List(); var resultBytesArray = client.MGet(keys.ToArray()); - + return ProcessGetValues(resultBytesArray); + } + private List ProcessGetValues(byte[][] resultBytesArray) + { var results = new List(); foreach (var resultBytes in resultBytesArray) { @@ -442,29 +409,41 @@ public T Store(T entity, TimeSpan expireIn) public void StoreAll(IEnumerable entities) { - if (entities == null) return; + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + client.MSet(keys, values); + client.RegisterTypeIds(entitiesList); + } + } + + private bool PrepareStoreAll(IEnumerable entities, out byte[][] keys, out byte[][] values, out List entitiesList) + { + if (entities == null) + { + keys = values = default; + entitiesList = default; + return false; + } - var entitiesList = entities.ToList(); + entitiesList = entities.ToList(); var len = entitiesList.Count; - var keys = new byte[len][]; - var values = new byte[len][]; + keys = new byte[len][]; + values = new byte[len][]; for (var i = 0; i < len; i++) { keys[i] = client.UrnKey(entitiesList[i]).ToUtf8Bytes(); values[i] = Redis.RedisClient.SerializeToUtf8Bytes(entitiesList[i]); } - - client.MSet(keys, values); - client.RegisterTypeIds(entitiesList); + return true; } public void Delete(T entity) { var urnKey = client.UrnKey(entity); this.RemoveEntry(urnKey); - client.RemoveTypeIds(entity); + client.RemoveTypeIdsByValue(entity); } public void DeleteById(object id) @@ -472,31 +451,42 @@ public void DeleteById(object id) var urnKey = client.UrnKey(id); this.RemoveEntry(urnKey); - client.RemoveTypeIds(id.ToString()); + client.RemoveTypeIdsById(id.ToString()); } public void DeleteByIds(IEnumerable ids) { if (ids == null) return; - var urnKeys = ids.Map(t => client.UrnKey(t)); - if (urnKeys.Count > 0) + var idStrings = ids.Map(x => x.ToString()).ToArray(); + var urnKeys = idStrings.Select(t => client.UrnKey(t)).ToArray(); + if (urnKeys.Length > 0) { - this.RemoveEntry(urnKeys.ToArray()); - client.RemoveTypeIds(ids.Map(x => x.ToString()).ToArray()); + this.RemoveEntry(urnKeys); + client.RemoveTypeIdsByIds(idStrings); } } - public void DeleteAll() + private void DeleteAll(ulong cursor, int pageSize) { - var ids = client.GetAllItemsFromSet(this.TypeIdsSetKey); - var urnKeys = ids.Map(t => client.UrnKey(t)); - if (urnKeys.Count > 0) + do { - - this.RemoveEntry(urnKeys.ToArray()); - this.RemoveEntry(this.TypeIdsSetKey); - } + var scanResult = client.SScan(this.TypeIdsSetKey, cursor, pageSize); + cursor = scanResult.Cursor; + var ids = scanResult.Results.Select(x => Encoding.UTF8.GetString(x)).ToList(); + var urnKeys = ids.Map(t => client.UrnKey(t)); + if (urnKeys.Count > 0) + { + this.RemoveEntry(urnKeys.ToArray()); + } + } while (cursor != 0); + + this.RemoveEntry(this.TypeIdsSetKey); + } + + public void DeleteAll() + { + DeleteAll(0,RedisConfig.CommandKeysBatchSize); } #endregion @@ -525,8 +515,5 @@ internal void EndPipeline() { client.EndPipeline(); } - - [Obsolete("Does nothing currently, RedisTypedClient will not be IDisposable in a future version")] - public void Dispose() { } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs index e3c0b3bf..e60432d0 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_Hash.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_List.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.Async.cs new file mode 100644 index 00000000..023590f3 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.Async.cs @@ -0,0 +1,33 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Generic +{ + public partial class RedisTypedClient + { + internal partial class RedisClientLists + : IHasNamed> + { + IRedisListAsync IHasNamed>.this[string listId] + { + get => new RedisClientList(client, listId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs index 56ed9592..144099e2 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_List.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -13,6 +13,7 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Threading; using ServiceStack.Model; namespace ServiceStack.Redis.Generic @@ -24,7 +25,7 @@ public partial class RedisTypedClient public IHasNamed> Lists { get; set; } - internal class RedisClientLists + internal partial class RedisClientLists : IHasNamed> { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.Async.cs new file mode 100644 index 00000000..24889db6 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis.Generic +{ + public partial class RedisTypedClient + { + internal partial class RedisClientSets + : IHasNamed> + { + IRedisSetAsync IHasNamed>.this[string setId] + { + get => new RedisClientSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs index 15e607ef..d8a0510e 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_Set.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -27,7 +27,7 @@ public long Db set { client.Db = value; } } - internal class RedisClientSets + internal partial class RedisClientSets : IHasNamed> { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.Async.cs new file mode 100644 index 00000000..ab4ede2e --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis.Generic +{ + public partial class RedisTypedClient + { + internal partial class RedisClientSortedSets + : IHasNamed> + { + IRedisSortedSetAsync IHasNamed>.this[string setId] + { + get => new RedisClientSortedSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} diff --git a/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs index 31113595..03155420 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedClient_SortedSet.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -21,7 +21,7 @@ public partial class RedisTypedClient { public IHasNamed> SortedSets { get; set; } - internal class RedisClientSortedSets + internal partial class RedisClientSortedSets : IHasNamed> { private readonly RedisTypedClient client; diff --git a/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs b/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs index 0128b55a..00989112 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedCommandQueue.cs @@ -273,7 +273,7 @@ public void QueueCommand(Func, HashSet> command, Ac BeginQueuedCommand(new QueuedRedisTypedCommand { MultiStringReturnCommand = r => command(r).ToList(), - OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToHashSet()), + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), OnErrorCallback = onErrorCallback }); command(RedisClient); @@ -294,7 +294,7 @@ public void QueueCommand(Func, HashSet> command, Action< BeginQueuedCommand(new QueuedRedisTypedCommand { MultiObjectReturnCommand = r => command(r).ToList(), - OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(y => JsonSerializer.DeserializeFromString(y)).ToHashSet()), + OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(JsonSerializer.DeserializeFromString).ToSet()), OnErrorCallback = onErrorCallback }); command(RedisClient); diff --git a/src/ServiceStack.Redis/Generic/RedisTypedPipeline.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.Async.cs new file mode 100644 index 00000000..2b571235 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.Async.cs @@ -0,0 +1,265 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + /// + /// Pipeline for redis typed client + /// + /// + public partial class RedisTypedPipeline + : IRedisTypedPipelineAsync + { + private IRedisTypedPipelineAsync AsAsync() => this; + void IRedisQueueCompletableOperationAsync.CompleteBytesQueuedCommandAsync(Func> bytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(bytesReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteDoubleQueuedCommandAsync(Func> doubleReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(doubleReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteIntQueuedCommandAsync(Func> intReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(intReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteLongQueuedCommandAsync(Func> longReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(longReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiBytesQueuedCommandAsync(Func> multiBytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiBytesReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiStringQueuedCommandAsync(Func>> multiStringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiStringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteRedisDataQueuedCommandAsync(Func> redisDataReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(redisDataReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteStringQueuedCommandAsync(Func> stringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(stringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteVoidQueuedCommandAsync(Func voidReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(voidReadCommand); + AddCurrentQueuedOperation(); + } + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + async ValueTask IRedisPipelineSharedAsync.FlushAsync(CancellationToken token) + { + try + { + // flush send buffers + await RedisClient.FlushSendBufferAsync(token).ConfigureAwait(false); + RedisClient.ResetSendBuffer(); + + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + + } + finally + { + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessVoidCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessIntCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessLongCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessBoolCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessDoubleCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessTypeCallback = x => onSuccessCallback(JsonSerializer.DeserializeFromString(x)), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessMultiStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(async r => + { + var result = await command(r).ConfigureAwait(false); + return result.ToList(); + })); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + void IRedisTypedQueueableOperationAsync.QueueCommand(Func, ValueTask>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisTypedCommand + { + OnSuccessMultiTypeCallback = x => onSuccessCallback(x.ConvertAll(JsonSerializer.DeserializeFromString)), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + RedisAllPurposePipeline.AssertSync(command(RedisClient)); + } + + async ValueTask IRedisPipelineSharedAsync.ReplayAsync(CancellationToken token) + { + RedisClient.Pipeline = this; + // execute + foreach (var queuedCommand in QueuedCommands) + { + if (queuedCommand is QueuedRedisTypedCommand cmd) + await cmd.ExecuteAsync(RedisClient).ConfigureAwait(false); + } + await AsAsync().FlushAsync(token).ConfigureAwait(false); + return true; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs index d6ad7ccc..86e42af1 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedPipeline.cs @@ -7,7 +7,7 @@ namespace ServiceStack.Redis /// Pipeline for redis typed client /// /// - public class RedisTypedPipeline : RedisTypedCommandQueue, IRedisTypedPipeline + public partial class RedisTypedPipeline : RedisTypedCommandQueue, IRedisTypedPipeline { internal RedisTypedPipeline(RedisTypedClient redisClient) : base(redisClient) @@ -53,13 +53,12 @@ protected void Execute() { foreach (var queuedCommand in QueuedCommands) { - var cmd = queuedCommand as QueuedRedisTypedCommand; - if (cmd != null) + if (queuedCommand is QueuedRedisTypedCommand cmd) cmd.Execute(RedisClient); } } - public bool Replay() + public virtual bool Replay() { RedisClient.Pipeline = this; Execute(); @@ -72,7 +71,7 @@ protected void ClosePipeline() RedisClient.EndPipeline(); } - public void Dispose() + public virtual void Dispose() { ClosePipeline(); } diff --git a/src/ServiceStack.Redis/Generic/RedisTypedTransaction.Async.cs b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.Async.cs new file mode 100644 index 00000000..33375094 --- /dev/null +++ b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.Async.cs @@ -0,0 +1,94 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Pipeline; + +namespace ServiceStack.Redis.Generic +{ + /// + /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). + /// + internal partial class RedisTypedTransaction + : IRedisTypedTransactionAsync, IRedisTransactionBaseAsync + { + async ValueTask IRedisTypedTransactionAsync.CommitAsync(CancellationToken token) + { + bool rc = true; + try + { + _numCommands = QueuedCommands.Count / 2; + + //insert multi command at beginning + QueuedCommands.Insert(0, new QueuedRedisCommand() + { + }.WithAsyncReturnCommand(VoidReturnCommandAsync: r => { Init(); return default; }) + .WithAsyncReadCommand(RedisClient.ExpectOkAsync)); + + //the first half of the responses will be "QUEUED", + // so insert reading of multiline after these responses + QueuedCommands.Insert(_numCommands + 1, new QueuedRedisOperation() + { + OnSuccessIntCallback = handleMultiDataResultCount + }.WithAsyncReadCommand(RedisClient.ReadMultiDataResultCountAsync)); + + // add Exec command at end (not queued) + QueuedCommands.Add(new RedisCommand() + { + }.WithAsyncReturnCommand(r => ExecAsync(token))); + + //execute transaction + await ExecAsync(token).ConfigureAwait(false); + + ///////////////////////////// + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (RedisTransactionFailedException) + { + rc = false; + } + finally + { + RedisClient.Transaction = null; + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + return rc; + } + + private ValueTask ExecAsync(CancellationToken token) + { + RedisClient.Exec(); + return RedisClient.FlushSendBufferAsync(token); + } + + ValueTask IRedisTypedTransactionAsync.RollbackAsync(CancellationToken token) + { + Rollback(); // no async bits needed + return default; + } + + partial void QueueExpectQueuedAsync() + { + QueuedCommands.Insert(0, new QueuedRedisOperation + { + }.WithAsyncReadCommand(RedisClient.ExpectQueuedAsync)); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs index c3c95a81..989ef1f4 100644 --- a/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs +++ b/src/ServiceStack.Redis/Generic/RedisTypedTransaction.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -19,14 +19,17 @@ namespace ServiceStack.Redis.Generic /// /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). /// - internal class RedisTypedTransaction + internal partial class RedisTypedTransaction : RedisTypedPipeline, IRedisTypedTransaction, IRedisTransactionBase { private int _numCommands = 0; - internal RedisTypedTransaction(RedisTypedClient redisClient) + private readonly bool _isAsync; + internal RedisTypedTransaction(RedisTypedClient redisClient, bool isAsync) : base(redisClient) { - + // if someone casts between sync/async: the sync-over-async or + // async-over-sync is entirely self-inflicted; I can't fix stupid + _isAsync = isAsync; } protected override void Init() @@ -59,7 +62,6 @@ private void Exec() { RedisClient.Exec(); RedisClient.FlushSendBuffer(); - } public bool Commit() @@ -76,7 +78,6 @@ public bool Commit() VoidReadCommand = RedisClient.ExpectOk, }); - //the first half of the responses will be "QUEUED", // so insert reading of multiline after these responses QueuedCommands.Insert(_numCommands + 1, new QueuedRedisOperation() @@ -120,10 +121,9 @@ public bool Commit() private void handleMultiDataResultCount(int count) { if (count != _numCommands) - throw new InvalidOperationException(string.Format( - "Invalid results received from 'EXEC', expected '{0}' received '{1}'" - + "\nWarning: Transaction was committed", - _numCommands, count)); + throw new InvalidOperationException( + $"Invalid results received from 'EXEC', expected '{_numCommands}' received '{count}'" + + "\nWarning: Transaction was committed"); } public void Rollback() @@ -135,7 +135,7 @@ public void Rollback() RedisClient.ClearTypeIdsRegisteredDuringPipeline(); } - public bool Replay() + public override bool Replay() { bool rc = true; try @@ -163,7 +163,7 @@ public bool Replay() return rc; } - public void Dispose() + public override void Dispose() { base.Dispose(); if (RedisClient.Transaction == null) return; @@ -175,8 +175,16 @@ public void Dispose() protected override void AddCurrentQueuedOperation() { base.AddCurrentQueuedOperation(); - QueueExpectQueued(); + if (_isAsync) + { + QueueExpectQueuedAsync(); + } + else + { + QueueExpectQueued(); + } } #endregion + partial void QueueExpectQueuedAsync(); } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.Async.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.Async.cs new file mode 100644 index 00000000..fcadd82b --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.Async.cs @@ -0,0 +1,51 @@ +using System; +using System.Threading.Tasks; +using System.Collections.Generic; +using ServiceStack.Redis.Internal; + +namespace ServiceStack.Redis.Pipeline +{ + /// + /// A complete redis command, with method to send command, receive response, and run callback on success or failure + /// + internal partial class QueuedRedisCommand : RedisCommand + { + public override ValueTask ExecuteAsync(IRedisClientAsync client) + { + try + { + switch (AsyncReturnCommand) + { + case null: + ExecuteThrowIfSync(); + return default; + case Func VoidReturnCommandAsync: + return VoidReturnCommandAsync(client); + case Func> IntReturnCommandAsync: + return IntReturnCommandAsync(client).Await(); + case Func> LongReturnCommandAsync: + return LongReturnCommandAsync(client).Await(); + case Func> DoubleReturnCommandAsync: + return DoubleReturnCommandAsync(client).Await(); + case Func> BytesReturnCommandAsync: + return BytesReturnCommandAsync(client).Await(); + case Func> StringReturnCommandAsync: + return StringReturnCommandAsync(client).Await(); + case Func> MultiBytesReturnCommandAsync: + return MultiBytesReturnCommandAsync(client).Await(); + case Func>> MultiStringReturnCommandAsync: + return MultiStringReturnCommandAsync(client).Await(); + case object obj: + ExecuteThrowIfSync(); + // Execute only processes a limited number of patterns; we'll respect that here too + throw new InvalidOperationException("Command cannot be executed in this context: " + obj.GetType().FullName); + } + } + catch (Exception ex) + { + Log.Error(ex); + throw; + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs index 1acb90ec..238c9521 100644 --- a/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisCommand.cs @@ -6,7 +6,7 @@ namespace ServiceStack.Redis.Pipeline /// /// A complete redis command, with method to send command, receive response, and run callback on success or failure /// - internal class QueuedRedisCommand : RedisCommand + internal partial class QueuedRedisCommand : RedisCommand { public override void Execute(IRedisClient client) { @@ -50,7 +50,10 @@ public override void Execute(IRedisClient client) else if (MultiStringReturnCommand != null) { MultiStringReturnCommand(client); - + } + else + { + ExecuteThrowIfAsync(); } } catch (Exception ex) @@ -58,7 +61,6 @@ public override void Execute(IRedisClient client) Log.Error(ex); throw; } - } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.Async.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.Async.cs new file mode 100644 index 00000000..e1779605 --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.Async.cs @@ -0,0 +1,159 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Pipeline +{ + internal partial class QueuedRedisOperation + { + public virtual ValueTask ExecuteAsync(IRedisClientAsync client) => default; + + private Delegate _asyncReadCommand; + private QueuedRedisOperation SetAsyncReadCommand(Delegate value) + { + if (_asyncReadCommand is object && _asyncReadCommand != value) + throw new InvalidOperationException("Only a single async read command can be assigned"); + _asyncReadCommand = value; + return this; + } + + internal QueuedRedisOperation WithAsyncReadCommand(Func VoidReadCommandAsync) + => SetAsyncReadCommand(VoidReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> IntReadCommandAsync) + => SetAsyncReadCommand(IntReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> LongReadCommandAsync) + => SetAsyncReadCommand(LongReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> BoolReadCommandAsync) + => SetAsyncReadCommand(BoolReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> BytesReadCommandAsync) + => SetAsyncReadCommand(BytesReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> MultiBytesReadCommandAsync) + => SetAsyncReadCommand(MultiBytesReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> StringReadCommandAsync) + => SetAsyncReadCommand(StringReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func>> MultiStringReadCommandAsync) + => SetAsyncReadCommand(MultiStringReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func>> DictionaryStringReadCommandAsync) + => SetAsyncReadCommand(DictionaryStringReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> DoubleReadCommandAsync) + => SetAsyncReadCommand(DoubleReadCommandAsync); + internal QueuedRedisOperation WithAsyncReadCommand(Func> RedisDataReadCommandAsync) + => SetAsyncReadCommand(RedisDataReadCommandAsync); + + public async ValueTask ProcessResultAsync(CancellationToken token) + { + try + { + switch (_asyncReadCommand) + { + case null: + ProcessResultThrowIfSync(); + break; + case Func VoidReadCommandAsync: + await VoidReadCommandAsync(token).ConfigureAwait(false); + OnSuccessVoidCallback?.Invoke(); + break; + case Func> IntReadCommandAsync: + var i32 = await IntReadCommandAsync(token).ConfigureAwait(false); + OnSuccessIntCallback?.Invoke(i32); + OnSuccessLongCallback?.Invoke(i32); + OnSuccessBoolCallback?.Invoke(i32 == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); + break; + case Func> LongReadCommandAsync: + var i64 = await LongReadCommandAsync(token).ConfigureAwait(false); + OnSuccessIntCallback?.Invoke((int)i64); + OnSuccessLongCallback?.Invoke(i64); + OnSuccessBoolCallback?.Invoke(i64 == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); + break; + case Func> DoubleReadCommandAsync: + var f64 = await DoubleReadCommandAsync(token).ConfigureAwait(false); + OnSuccessDoubleCallback?.Invoke(f64); + break; + case Func> BytesReadCommandAsync: + var bytes = await BytesReadCommandAsync(token).ConfigureAwait(false); + if (bytes != null && bytes.Length == 0) bytes = null; + OnSuccessBytesCallback?.Invoke(bytes); + OnSuccessStringCallback?.Invoke(bytes != null ? Encoding.UTF8.GetString(bytes) : null); + OnSuccessTypeCallback?.Invoke(bytes != null ? Encoding.UTF8.GetString(bytes) : null); + OnSuccessIntCallback?.Invoke(bytes != null ? int.Parse(Encoding.UTF8.GetString(bytes)) : 0); + OnSuccessBoolCallback?.Invoke(bytes != null && Encoding.UTF8.GetString(bytes) == "OK"); + break; + case Func> StringReadCommandAsync: + var s = await StringReadCommandAsync(token).ConfigureAwait(false); + OnSuccessStringCallback?.Invoke(s); + OnSuccessTypeCallback?.Invoke(s); + break; + case Func> MultiBytesReadCommandAsync: + var multiBytes = await MultiBytesReadCommandAsync(token).ConfigureAwait(false); + OnSuccessMultiBytesCallback?.Invoke(multiBytes); + OnSuccessMultiStringCallback?.Invoke(multiBytes?.ToStringList()); + OnSuccessMultiTypeCallback?.Invoke(multiBytes.ToStringList()); + OnSuccessDictionaryStringCallback?.Invoke(multiBytes.ToStringDictionary()); + break; + case Func>> MultiStringReadCommandAsync: + var multiString = await MultiStringReadCommandAsync(token).ConfigureAwait(false); + OnSuccessMultiStringCallback?.Invoke(multiString); + break; + case Func> RedisDataReadCommandAsync: + var data = await RedisDataReadCommandAsync(token).ConfigureAwait(false); + OnSuccessRedisTextCallback?.Invoke(data.ToRedisText()); + OnSuccessRedisDataCallback?.Invoke(data); + break; + case Func> BoolReadCommandAsync: + var b = await BoolReadCommandAsync(token).ConfigureAwait(false); + OnSuccessBoolCallback?.Invoke(b); + break; + case Func>> DictionaryStringReadCommandAsync: + var dict = await DictionaryStringReadCommandAsync(token).ConfigureAwait(false); + OnSuccessDictionaryStringCallback?.Invoke(dict); + break; + default: + ProcessResultThrowIfSync(); + break; + } + } + catch (Exception ex) + { + Log.Error(ex); + + if (OnErrorCallback != null) + { + OnErrorCallback(ex); + } + else + { + throw; + } + } + } + + partial void OnProcessResultThrowIfAsync() + { + if (_asyncReadCommand is object) + { + throw new InvalidOperationException("An async read command was present, but the queued operation is being processed synchronously"); + } + } + private void ProcessResultThrowIfSync() + { + if (VoidReadCommand is object + || IntReadCommand is object + || LongReadCommand is object + || BoolReadCommand is object + || BytesReadCommand is object + || MultiBytesReadCommand is object + || StringReadCommand is object + || MultiBytesReadCommand is object + || DictionaryStringReadCommand is object + || DoubleReadCommand is object + || RedisDataReadCommand is object) + { + throw new InvalidOperationException("A sync read command was present, but the queued operation is being processed asynchronously"); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs index 40edd949..3a45d25b 100644 --- a/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs +++ b/src/ServiceStack.Redis/Pipeline/QueuedRedisOperation.cs @@ -5,7 +5,7 @@ namespace ServiceStack.Redis.Pipeline { - internal class QueuedRedisOperation + internal partial class QueuedRedisOperation { protected static readonly ILog Log = LogManager.GetLogger(typeof(QueuedRedisOperation)); @@ -51,60 +51,28 @@ public void ProcessResult() if (VoidReadCommand != null) { VoidReadCommand(); - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } + OnSuccessVoidCallback?.Invoke(); } else if (IntReadCommand != null) { var result = IntReadCommand(); - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback(result); - } - if (OnSuccessLongCallback != null) - { - OnSuccessLongCallback(result); - } - if (OnSuccessBoolCallback != null) - { - var success = result == RedisNativeClient.Success; - OnSuccessBoolCallback(success); - } - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } + OnSuccessIntCallback?.Invoke(result); + OnSuccessLongCallback?.Invoke(result); + OnSuccessBoolCallback?.Invoke(result == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); } else if (LongReadCommand != null) { var result = LongReadCommand(); - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback((int)result); - } - if (OnSuccessLongCallback != null) - { - OnSuccessLongCallback(result); - } - if (OnSuccessBoolCallback != null) - { - var success = result == RedisNativeClient.Success; - OnSuccessBoolCallback(success); - } - if (OnSuccessVoidCallback != null) - { - OnSuccessVoidCallback(); - } + OnSuccessIntCallback?.Invoke((int)result); + OnSuccessLongCallback?.Invoke(result); + OnSuccessBoolCallback?.Invoke(result == RedisNativeClient.Success); + OnSuccessVoidCallback?.Invoke(); } else if (DoubleReadCommand != null) { var result = DoubleReadCommand(); - if (OnSuccessDoubleCallback != null) - { - OnSuccessDoubleCallback(result); - } + OnSuccessDoubleCallback?.Invoke(result); } else if (BytesReadCommand != null) { @@ -112,74 +80,50 @@ public void ProcessResult() if (result != null && result.Length == 0) result = null; - if (OnSuccessBytesCallback != null) - { - OnSuccessBytesCallback(result); - } - if (OnSuccessStringCallback != null) - { - OnSuccessStringCallback(result != null ? Encoding.UTF8.GetString(result) : null); - } - if (OnSuccessTypeCallback != null) - { - OnSuccessTypeCallback(result != null ? Encoding.UTF8.GetString(result) : null); - } - if (OnSuccessIntCallback != null) - { - OnSuccessIntCallback(result != null ? int.Parse(Encoding.UTF8.GetString(result)) : 0); - } + OnSuccessBytesCallback?.Invoke(result); + OnSuccessStringCallback?.Invoke(result != null ? Encoding.UTF8.GetString(result) : null); + OnSuccessTypeCallback?.Invoke(result != null ? Encoding.UTF8.GetString(result) : null); + OnSuccessIntCallback?.Invoke(result != null ? int.Parse(Encoding.UTF8.GetString(result)) : 0); + OnSuccessBoolCallback?.Invoke(result != null && Encoding.UTF8.GetString(result) == "OK"); } else if (StringReadCommand != null) { var result = StringReadCommand(); - if (OnSuccessStringCallback != null) - { - OnSuccessStringCallback(result); - } - if (OnSuccessTypeCallback != null) - { - OnSuccessTypeCallback(result); - } + OnSuccessStringCallback?.Invoke(result); + OnSuccessTypeCallback?.Invoke(result); } else if (MultiBytesReadCommand != null) { var result = MultiBytesReadCommand(); - if (OnSuccessMultiBytesCallback != null) - { - OnSuccessMultiBytesCallback(result); - } - if (OnSuccessMultiStringCallback != null) - { - OnSuccessMultiStringCallback(result != null ? result.ToStringList() : null); - } - if (OnSuccessMultiTypeCallback != null) - { - OnSuccessMultiTypeCallback(result.ToStringList()); - } - if (OnSuccessDictionaryStringCallback != null) - { - OnSuccessDictionaryStringCallback(result.ToStringDictionary()); - } + OnSuccessMultiBytesCallback?.Invoke(result); + OnSuccessMultiStringCallback?.Invoke(result != null ? result.ToStringList() : null); + OnSuccessMultiTypeCallback?.Invoke(result.ToStringList()); + OnSuccessDictionaryStringCallback?.Invoke(result.ToStringDictionary()); } else if (MultiStringReadCommand != null) { var result = MultiStringReadCommand(); - if (OnSuccessMultiStringCallback != null) - { - OnSuccessMultiStringCallback(result); - } + OnSuccessMultiStringCallback?.Invoke(result); } else if (RedisDataReadCommand != null) { var data = RedisDataReadCommand(); - if (OnSuccessRedisTextCallback != null) - { - OnSuccessRedisTextCallback(data.ToRedisText()); - } - if (OnSuccessRedisDataCallback != null) - { - OnSuccessRedisDataCallback(data); - } + OnSuccessRedisTextCallback?.Invoke(data.ToRedisText()); + OnSuccessRedisDataCallback?.Invoke(data); + } + else if (BoolReadCommand != null) + { + var result = BoolReadCommand(); + OnSuccessBoolCallback?.Invoke(result); + } + else if (DictionaryStringReadCommand != null) + { + var result = DictionaryStringReadCommand(); + OnSuccessDictionaryStringCallback?.Invoke(result); + } + else + { + ProcessResultThrowIfAsync(); } } catch (Exception ex) @@ -197,5 +141,7 @@ public void ProcessResult() } } + protected void ProcessResultThrowIfAsync() => OnProcessResultThrowIfAsync(); + partial void OnProcessResultThrowIfAsync(); } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.Async.cs b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.Async.cs new file mode 100644 index 00000000..992bb480 --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.Async.cs @@ -0,0 +1,330 @@ +using ServiceStack.Redis.Pipeline; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + + public partial class RedisAllPurposePipeline : IRedisPipelineAsync + { + private IRedisPipelineAsync AsAsync() => this; + + private protected virtual async ValueTask ReplayAsync(CancellationToken token) + { + Init(); + await ExecuteAsync().ConfigureAwait(false); + await AsAsync().FlushAsync(token).ConfigureAwait(false); + return true; + } + + protected async ValueTask ExecuteAsync() + { + int count = QueuedCommands.Count; + for (int i = 0; i < count; ++i) + { + var op = QueuedCommands[0]; + QueuedCommands.RemoveAt(0); + await op.ExecuteAsync(RedisClient).ConfigureAwait(false); + QueuedCommands.Add(op); + } + } + + ValueTask IRedisPipelineSharedAsync.ReplayAsync(CancellationToken token) + => ReplayAsync(token); + + async ValueTask IRedisPipelineSharedAsync.FlushAsync(CancellationToken token) + { + // flush send buffers + await RedisClient.FlushSendBufferAsync(token).ConfigureAwait(false); + RedisClient.ResetSendBuffer(); + + try + { + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (Exception) + { + // The connection cannot be reused anymore. All queued commands have been sent to redis. Even if a new command is executed, the next response read from the + // network stream can be the response of one of the queued commands, depending on when the exception occurred. This response would be invalid for the new command. + RedisClient.DisposeConnection(); + throw; + } + + ClosePipeline(); + } + + private protected virtual ValueTask DisposeAsync() + { + // don't need to send anything; just clean up + Dispose(); + return default; + } + + ValueTask IAsyncDisposable.DisposeAsync() => DisposeAsync(); + + internal static void AssertSync(ValueTask command) + { + if (!command.IsCompleted) + { + _ = ObserveAsync(command.AsTask()); + throw new InvalidOperationException($"The operations provided to {nameof(IRedisQueueableOperationAsync.QueueCommand)} should not perform asynchronous operations internally"); + } + // this serves two purposes: 1) surface any fault, and + // 2) ensure that if pooled (IValueTaskSource), it is reclaimed + _ = command.Result; + } + + internal static void AssertSync(ValueTask command) + { + if (!command.IsCompleted) + { + _ = ObserveAsync(command.AsTask()); + throw new InvalidOperationException($"The operations provided to {nameof(IRedisQueueableOperationAsync.QueueCommand)} should not perform asynchronous operations internally"); + } + // this serves two purposes: 1) surface any fault, and + // 2) ensure that if pooled (IValueTaskSource), it is reclaimed + command.GetAwaiter().GetResult(); + } + + static async Task ObserveAsync(Task task) // semantically this is "async void", but: some sync-contexts explode on that + { + // we've already thrown an exception via AssertSync; this + // just ensures that an "unobserved exception" doesn't fire + // as well + try { await task.ConfigureAwait(false); } + catch { } + } + + void IRedisQueueableOperationAsync.QueueCommand(Func command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessVoidCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessIntCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessLongCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessBoolCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessDoubleCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessMultiBytesCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessMultiStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(async r => + { + var result = await command(r).ConfigureAwait(false); + return result.ToList(); + })); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func>> command, Action> onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessDictionaryStringCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessRedisDataCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueableOperationAsync.QueueCommand(Func> command, Action onSuccessCallback, Action onErrorCallback) + { + BeginQueuedCommand(new QueuedRedisCommand + { + OnSuccessRedisTextCallback = onSuccessCallback, + OnErrorCallback = onErrorCallback + }.WithAsyncReturnCommand(command)); + AssertSync(command(RedisClient)); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiBytesQueuedCommandAsync(Func> multiBytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiBytesReadCommand); + AddCurrentQueuedOperation(); + } + + + void IRedisQueueCompletableOperationAsync.CompleteLongQueuedCommandAsync(Func> longReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(longReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteBytesQueuedCommandAsync(Func> bytesReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(bytesReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteVoidQueuedCommandAsync(Func voidReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(voidReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteStringQueuedCommandAsync(Func> stringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(stringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteDoubleQueuedCommandAsync(Func> doubleReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(doubleReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteIntQueuedCommandAsync(Func> intReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(intReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteMultiStringQueuedCommandAsync(Func>> multiStringReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(multiStringReadCommand); + AddCurrentQueuedOperation(); + } + + void IRedisQueueCompletableOperationAsync.CompleteRedisDataQueuedCommandAsync(Func> redisDataReadCommand) + { + //AssertCurrentOperation(); + // this can happen when replaying pipeline/transaction + if (CurrentQueuedOperation == null) return; + + CurrentQueuedOperation.WithAsyncReadCommand(redisDataReadCommand); + AddCurrentQueuedOperation(); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs index e861ccc8..19b8fd1b 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisAllPurposePipeline.cs @@ -1,10 +1,10 @@ -using System; using ServiceStack.Redis.Pipeline; +using System; namespace ServiceStack.Redis { - public class RedisAllPurposePipeline : RedisCommandQueue, IRedisPipeline + public partial class RedisAllPurposePipeline : RedisCommandQueue, IRedisPipeline { /// /// General purpose pipeline @@ -35,14 +35,24 @@ public void Flush() { // flush send buffers RedisClient.FlushAndResetSendBuffer(); - - //receive expected results - foreach (var queuedCommand in QueuedCommands) + + try { - queuedCommand.ProcessResult(); + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + queuedCommand.ProcessResult(); + } } + catch (Exception) + { + // The connection cannot be reused anymore. All queued commands have been sent to redis. Even if a new command is executed, the next response read from the + // network stream can be the response of one of the queued commands, depending on when the exception occurred. This response would be invalid for the new command. + RedisClient.DisposeConnection(); + throw; + } + ClosePipeline(); - } protected void Execute() @@ -57,7 +67,7 @@ protected void Execute() } } - public bool Replay() + public virtual bool Replay() { Init(); Execute(); @@ -70,7 +80,7 @@ protected void ClosePipeline() RedisClient.EndPipeline(); } - public void Dispose() + public virtual void Dispose() { ClosePipeline(); } diff --git a/src/ServiceStack.Redis/Pipeline/RedisCommand.Async.cs b/src/ServiceStack.Redis/Pipeline/RedisCommand.Async.cs new file mode 100644 index 00000000..0485af6f --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/RedisCommand.Async.cs @@ -0,0 +1,118 @@ +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + /// + /// Redis command that does not get queued + /// + internal partial class RedisCommand + { + private Delegate _asyncReturnCommand; + protected Delegate AsyncReturnCommand => _asyncReturnCommand; + private RedisCommand SetAsyncReturnCommand(Delegate value) + { + if (_asyncReturnCommand is object && _asyncReturnCommand != value) + throw new InvalidOperationException("Only a single async return command can be assigned"); + _asyncReturnCommand = value; + return this; + } + internal RedisCommand WithAsyncReturnCommand(Func VoidReturnCommandAsync) + => SetAsyncReturnCommand(VoidReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> IntReturnCommandAsync) + => SetAsyncReturnCommand(IntReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> LongReturnCommandAsync) + => SetAsyncReturnCommand(LongReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> BoolReturnCommandAsync) + => SetAsyncReturnCommand(BoolReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> BytesReturnCommandAsync) + => SetAsyncReturnCommand(BytesReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> MultiBytesReturnCommandAsync) + => SetAsyncReturnCommand(MultiBytesReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> StringReturnCommandAsync) + => SetAsyncReturnCommand(StringReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func>> MultiStringReturnCommandAsync) + => SetAsyncReturnCommand(MultiStringReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func>> DictionaryStringReturnCommandAsync) + => SetAsyncReturnCommand(DictionaryStringReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> RedisDataReturnCommandAsync) + => SetAsyncReturnCommand(RedisDataReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> RedisTextReturnCommandAsync) + => SetAsyncReturnCommand(RedisTextReturnCommandAsync); + internal RedisCommand WithAsyncReturnCommand(Func> DoubleReturnCommandAsync) + => SetAsyncReturnCommand(DoubleReturnCommandAsync); + + public override ValueTask ExecuteAsync(IRedisClientAsync client) + { + try + { + switch (_asyncReturnCommand) + { + case null: + ExecuteThrowIfSync(); + return default; + case Func VoidReturnCommandAsync: + return VoidReturnCommandAsync(client); + case Func> IntReturnCommandAsync: + return IntReturnCommandAsync(client).Await(); + case Func> LongReturnCommandAsync: + return LongReturnCommandAsync(client).Await(); + case Func> DoubleReturnCommandAsync: + return DoubleReturnCommandAsync(client).Await(); + case Func> BytesReturnCommandAsync: + return BytesReturnCommandAsync(client).Await(); + case Func> StringReturnCommandAsync: + return StringReturnCommandAsync(client).Await(); + case Func> MultiBytesReturnCommandAsync: + return MultiBytesReturnCommandAsync(client).Await(); + case Func>> MultiStringReturnCommandAsync: + return MultiStringReturnCommandAsync(client).Await(); + case Func>> DictionaryStringReturnCommandAsync: + return DictionaryStringReturnCommandAsync(client).Await(); + case Func> RedisDataReturnCommandAsync: + return RedisDataReturnCommandAsync(client).Await(); + case Func> RedisTextReturnCommandAsync: + return RedisTextReturnCommandAsync(client).Await(); + case Func> BoolReturnCommandAsync: + return BoolReturnCommandAsync(client).Await(); + case object obj: + ExecuteThrowIfSync(); + return default; + } + } + catch (Exception ex) + { + Log.Error(ex); + return default; // RedisCommand.Execute swallows here; we'll do the same + } + } + + partial void OnExecuteThrowIfAsync() + { + if (_asyncReturnCommand is object) + { + throw new InvalidOperationException("An async return command was present, but the queued operation is being processed synchronously"); + } + } + protected void ExecuteThrowIfSync() + { + if (VoidReturnCommand is object + || IntReturnCommand is object + || LongReturnCommand is object + || BoolReturnCommand is object + || BytesReturnCommand is object + || MultiBytesReturnCommand is object + || StringReturnCommand is object + || MultiStringReturnCommand is object + || DictionaryStringReturnCommand is object + || RedisDataReturnCommand is object + || RedisTextReturnCommand is object + || DoubleReturnCommand is object) + { + throw new InvalidOperationException("A sync return command was present, but the queued operation is being processed asynchronously"); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisCommand.cs b/src/ServiceStack.Redis/Pipeline/RedisCommand.cs index ab64a2c9..abe09a35 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisCommand.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisCommand.cs @@ -9,7 +9,7 @@ namespace ServiceStack.Redis /// /// Redis command that does not get queued /// - internal class RedisCommand : QueuedRedisOperation + internal partial class RedisCommand : QueuedRedisOperation { public Action VoidReturnCommand { get; set; } public Func IntReturnCommand { get; set; } @@ -79,11 +79,22 @@ public override void Execute(IRedisClient client) { RedisTextReturnCommand(client); } + else if (BoolReturnCommand != null) + { + BoolReturnCommand(client); + } + else + { + ExecuteThrowIfAsync(); + } } catch (Exception ex) { Log.Error(ex); } } + + protected void ExecuteThrowIfAsync() => OnExecuteThrowIfAsync(); + partial void OnExecuteThrowIfAsync(); } } diff --git a/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs b/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs index 0a5f3abf..e21d42ab 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisCommandQueue.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -242,7 +242,7 @@ public virtual void QueueCommand(Func> command, Ac BeginQueuedCommand(new QueuedRedisCommand { MultiStringReturnCommand = r => command(r).ToList(), - OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToHashSet()), + OnSuccessMultiStringCallback = list => onSuccessCallback(list.ToSet()), OnErrorCallback = onErrorCallback }); command(RedisClient); diff --git a/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.Async.cs b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.Async.cs new file mode 100644 index 00000000..bea7be2f --- /dev/null +++ b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.Async.cs @@ -0,0 +1,32 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Pipeline +{ + partial class RedisPipelineCommand + { + internal async ValueTask> ReadAllAsIntsAsync(CancellationToken token) + { + var results = new List(); + while (cmdCount-- > 0) + { + results.Add(await client.ReadLongAsync(token).ConfigureAwait(false)); + } + + return results; + } + internal async ValueTask ReadAllAsIntsHaveSuccessAsync(CancellationToken token) + { + var allResults = await ReadAllAsIntsAsync(token).ConfigureAwait(false); + return allResults.All(x => x == RedisNativeClient.Success); + } + + internal ValueTask FlushAsync(CancellationToken token) + { + Flush(); + return default; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs index 6c2830dd..67b4cb69 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisPipelineCommand.cs @@ -3,7 +3,7 @@ namespace ServiceStack.Redis.Pipeline { - public class RedisPipelineCommand + public partial class RedisPipelineCommand { private readonly RedisNativeClient client; private int cmdCount; diff --git a/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs b/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs index 8a399cac..92b10529 100644 --- a/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs +++ b/src/ServiceStack.Redis/Pipeline/RedisQueueCompletableOperation.cs @@ -7,7 +7,7 @@ namespace ServiceStack.Redis /// /// Redis operation (transaction/pipeline) that allows queued commands to be completed /// - public class RedisQueueCompletableOperation + public partial class RedisQueueCompletableOperation { internal readonly List QueuedCommands = new List(); diff --git a/src/ServiceStack.Redis/PooledRedisClientManager.Async.cs b/src/ServiceStack.Redis/PooledRedisClientManager.Async.cs new file mode 100644 index 00000000..79734a35 --- /dev/null +++ b/src/ServiceStack.Redis/PooledRedisClientManager.Async.cs @@ -0,0 +1,283 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Caching; +using ServiceStack.Redis.Internal; +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.AsyncEx; + +namespace ServiceStack.Redis +{ + public partial class PooledRedisClientManager + : IRedisClientsManagerAsync + { + /// + /// Use previous client resolving behavior + /// + public static bool UseGetClientBlocking = false; + + ValueTask IRedisClientsManagerAsync.GetCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this).AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetClientAsync(CancellationToken token) => UseGetClientBlocking + ? GetClientBlocking().AsValueTaskResult() + : GetClientAsync(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this) { ReadOnly = true }.AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyClientAsync(CancellationToken token) => UseGetClientBlocking + ? GetReadOnlyClientBlocking().AsValueTaskResult() + : GetReadOnlyClientAsync(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + private AsyncManualResetEvent readAsyncEvent; + partial void PulseAllReadAsync() + { + readAsyncEvent?.Set(); + readAsyncEvent?.Reset(); + } + + private AsyncManualResetEvent writeAsyncEvent; + partial void PulseAllWriteAsync() + { + writeAsyncEvent?.Set(); + writeAsyncEvent?.Reset(); + } + + private async Task WaitForWriter(int msTimeout) + { + // If we're not doing async, no need to create this till we need it. + writeAsyncEvent ??= new AsyncManualResetEvent(false); + var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(msTimeout)); + try + { + await writeAsyncEvent.WaitAsync(cts.Token); + } + catch (OperationCanceledException) { return false; } + return true; + } + + private async ValueTask GetClientAsync() + { + try + { + var inactivePoolIndex = -1; + do + { + lock (writeClients) + { + AssertValidReadWritePool(); + + // If it's -1, then we want to try again after a delay of some kind. So if it's NOT negative one, process it... + if ((inactivePoolIndex = GetInActiveWriteClient(out var inActiveClient)) != -1) + { + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + WritePoolIndex++; + inActiveClient.Activate(); + + InitClient(inActiveClient); + + return inActiveClient; + } + else + { + // Still need to be in lock for this! + break; + } + } + } + + if (PoolTimeout.HasValue) + { + // We have a timeout value set - so try to not wait longer than this. + if (!await WaitForWriter(PoolTimeout.Value)) + { + throw new TimeoutException(PoolTimeoutError); + } + } + else + { + // Wait forever, so just retry till we get one. + await WaitForWriter(RecheckPoolAfterMs); + } + } while (true); // Just keep repeating until we get a slot. + + //Reaches here when there's no Valid InActive Clients, but we have a slot for one! + try + { + //inactivePoolIndex = index of reservedSlot || index of invalid client + var existingClient = writeClients[inactivePoolIndex]; + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateMasterClient(inactivePoolIndex)); + + //Put all blocking I/O or potential Exceptions before lock + lock (writeClients) + { + //If existingClient at inactivePoolIndex changed (failover) return new client outside of pool + if (writeClients[inactivePoolIndex] != existingClient) + { + if (Log.IsDebugEnabled) + Log.Debug("writeClients[inactivePoolIndex] != existingClient: {0}".Fmt(writeClients[inactivePoolIndex])); + + return newClient; //return client outside of pool + } + + WritePoolIndex++; + writeClients[inactivePoolIndex] = newClient; + + return !AssertAccessOnlyOnSameThread + ? newClient + : newClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw (before lock) + lock (writeClients) + { + writeClients[inactivePoolIndex] = null; //free slot + } + throw; + } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } + + private async Task WaitForReader(int msTimeout) + { + // If we're not doing async, no need to create this till we need it. + readAsyncEvent ??= new AsyncManualResetEvent(false); + var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(msTimeout)); + try + { + await readAsyncEvent.WaitAsync(cts.Token); + } + catch (OperationCanceledException) { return false; } + return true; + } + + private async ValueTask GetReadOnlyClientAsync() + { + try + { + var inactivePoolIndex = -1; + do + { + lock (readClients) + { + AssertValidReadOnlyPool(); + + // If it's -1, then we want to try again after a delay of some kind. So if it's NOT negative one, process it... + if ((inactivePoolIndex = GetInActiveReadClient(out var inActiveClient)) != -1) + { + //inActiveClient != null only for Valid InActive Clients + if (inActiveClient != null) + { + ReadPoolIndex++; + inActiveClient.Activate(); + + InitClient(inActiveClient); + + return inActiveClient; + } + else + { + // Still need to be in lock for this! + break; + } + } + } + + if (PoolTimeout.HasValue) + { + // We have a timeout value set - so try to not wait longer than this. + if (!await WaitForReader(PoolTimeout.Value)) + { + throw new TimeoutException(PoolTimeoutError); + } + } + else + { + // Wait forever, so just retry till we get one. + await WaitForReader(RecheckPoolAfterMs); + } + } while (true); // Just keep repeating until we get a slot. + + //Reaches here when there's no Valid InActive Clients + try + { + //inactivePoolIndex = index of reservedSlot || index of invalid client + var existingClient = readClients[inactivePoolIndex]; + if (existingClient != null && existingClient != reservedSlot && existingClient.HadExceptions) + { + RedisState.DeactivateClient(existingClient); + } + + var newClient = InitNewClient(RedisResolver.CreateSlaveClient(inactivePoolIndex)); + + //Put all blocking I/O or potential Exceptions before lock + lock (readClients) + { + //If existingClient at inactivePoolIndex changed (failover) return new client outside of pool + if (readClients[inactivePoolIndex] != existingClient) + { + if (Log.IsDebugEnabled) + Log.Debug("readClients[inactivePoolIndex] != existingClient: {0}".Fmt(readClients[inactivePoolIndex])); + + Interlocked.Increment(ref RedisState.TotalClientsCreatedOutsidePool); + + //Don't handle callbacks for new client outside pool + newClient.ClientManager = null; + return newClient; //return client outside of pool + } + + ReadPoolIndex++; + readClients[inactivePoolIndex] = newClient; + return newClient; + } + } + catch + { + //Revert free-slot for any I/O exceptions that can throw + lock (readClients) + { + readClients[inactivePoolIndex] = null; //free slot + } + throw; + } + } + finally + { + RedisState.DisposeExpiredClients(); + } + } + + } + +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/PooledRedisClientManager.cs b/src/ServiceStack.Redis/PooledRedisClientManager.cs index f04c73c3..481db23a 100644 --- a/src/ServiceStack.Redis/PooledRedisClientManager.cs +++ b/src/ServiceStack.Redis/PooledRedisClientManager.cs @@ -5,14 +5,13 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // using System; using System.Collections.Generic; -using System.IO; using System.Linq; using System.Threading; using ServiceStack.Caching; @@ -23,8 +22,8 @@ namespace ServiceStack.Redis { /// /// Provides thread-safe pooling of redis client connections. - /// Allows load-balancing of master-write and read-slave hosts, ideal for - /// 1 master and multiple replicated read slaves. + /// Allows load-balancing of master-write and read-replica hosts, ideal for + /// 1 master and multiple replicated read replicas. /// public partial class PooledRedisClientManager : IRedisClientsManager, IRedisFailover, IHandleClientDispose, IHasRedisResolver @@ -34,14 +33,15 @@ public partial class PooledRedisClientManager private const string PoolTimeoutError = "Redis Timeout expired. The timeout period elapsed prior to obtaining a connection from the pool. This may have occurred because all pooled connections were in use."; - protected readonly int PoolSizeMultiplier = 10; + protected readonly int PoolSizeMultiplier = 20; public int RecheckPoolAfterMs = 100; public int? PoolTimeout { get; set; } public int? ConnectTimeout { get; set; } public int? SocketSendTimeout { get; set; } public int? SocketReceiveTimeout { get; set; } public int? IdleTimeOutSecs { get; set; } - + public bool AssertAccessOnlyOnSameThread { get; set; } + /// /// Gets or sets object key prefix. /// @@ -103,8 +103,8 @@ public PooledRedisClientManager( public PooledRedisClientManager( IEnumerable readWriteHosts, IEnumerable readOnlyHosts, - long initalDb) - : this(readWriteHosts, readOnlyHosts, null, initalDb, null, null) + long initialDb) + : this(readWriteHosts, readOnlyHosts, null, initialDb, null, null) { } @@ -112,25 +112,25 @@ public PooledRedisClientManager( IEnumerable readWriteHosts, IEnumerable readOnlyHosts, RedisClientManagerConfig config, - long? initalDb, + long? initialDb, int? poolSizeMultiplier, int? poolTimeOutSeconds) { this.Db = config != null - ? config.DefaultDb ?? initalDb - : initalDb; + ? config.DefaultDb ?? initialDb + : initialDb; var masters = (readWriteHosts ?? TypeConstants.EmptyStringArray).ToArray(); - var slaves = (readOnlyHosts ?? TypeConstants.EmptyStringArray).ToArray(); + var replicas = (readOnlyHosts ?? TypeConstants.EmptyStringArray).ToArray(); - RedisResolver = new RedisResolver(masters, slaves); + RedisResolver = new RedisResolver(masters, replicas); - this.PoolSizeMultiplier = poolSizeMultiplier ?? 10; + this.PoolSizeMultiplier = poolSizeMultiplier ?? RedisConfig.DefaultPoolSizeMultiplier; this.Config = config ?? new RedisClientManagerConfig { MaxWritePoolSize = RedisConfig.DefaultMaxPoolSize ?? masters.Length * PoolSizeMultiplier, - MaxReadPoolSize = RedisConfig.DefaultMaxPoolSize ?? slaves.Length * PoolSizeMultiplier, + MaxReadPoolSize = RedisConfig.DefaultMaxPoolSize ?? replicas.Length * PoolSizeMultiplier, }; this.OnFailover = new List>(); @@ -140,6 +140,8 @@ public PooledRedisClientManager( ? poolTimeOutSeconds * 1000 : 2000; //Default Timeout + this.AssertAccessOnlyOnSameThread = RedisConfig.AssertAccessOnlyOnSameThread; + JsConfig.InitStatics(); if (this.Config.AutoStart) @@ -157,6 +159,11 @@ public void FailoverTo(IEnumerable readWriteHosts, IEnumerable r { Interlocked.Increment(ref RedisState.TotalFailovers); + var masters = readWriteHosts.ToList(); + var replicas = readOnlyHosts.ToList(); + + Log.Info($"FailoverTo: {string.Join(",", masters)} : {string.Join(",", replicas)} Total: {RedisState.TotalFailovers}"); + lock (readClients) { for (var i = 0; i < readClients.Length; i++) @@ -167,7 +174,7 @@ public void FailoverTo(IEnumerable readWriteHosts, IEnumerable r readClients[i] = null; } - RedisResolver.ResetSlaves(readOnlyHosts); + RedisResolver.ResetSlaves(replicas); } lock (writeClients) @@ -180,7 +187,7 @@ public void FailoverTo(IEnumerable readWriteHosts, IEnumerable r writeClients[i] = null; } - RedisResolver.ResetMasters(readWriteHosts); + RedisResolver.ResetMasters(masters); } if (this.OnFailover != null) @@ -208,7 +215,9 @@ protected virtual void OnStart() /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts /// /// - public IRedisClient GetClient() + public IRedisClient GetClient() => GetClientBlocking(); + + private RedisClient GetClientBlocking() { try { @@ -240,11 +249,13 @@ public IRedisClient GetClient() if (inActiveClient != null) { WritePoolIndex++; - inActiveClient.Active = true; + inActiveClient.Activate(); InitClient(inActiveClient); - return inActiveClient; + return (!AssertAccessOnlyOnSameThread) + ? inActiveClient + : inActiveClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); } } @@ -277,7 +288,10 @@ public IRedisClient GetClient() WritePoolIndex++; writeClients[inactivePoolIndex] = newClient; - return newClient; + + return (!AssertAccessOnlyOnSameThread) + ? newClient + : newClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); } } catch @@ -315,7 +329,7 @@ public override void Dispose() {} private int GetInActiveWriteClient(out RedisClient inactiveClient) { //this will loop through all hosts in readClients once even though there are 2 for loops - //both loops are used to try to get the prefered host according to the round robin algorithm + //both loops are used to try to get the preferred host according to the round robin algorithm var readWriteTotal = RedisResolver.ReadWriteHostsCount; var desiredIndex = WritePoolIndex % writeClients.Length; for (int x = 0; x < readWriteTotal; x++) @@ -351,7 +365,9 @@ private int GetInActiveWriteClient(out RedisClient inactiveClient) /// Returns a ReadOnly client using the hosts defined in ReadOnlyHosts. /// /// - public virtual IRedisClient GetReadOnlyClient() + public virtual IRedisClient GetReadOnlyClient() => GetReadOnlyClientBlocking(); + + private RedisClient GetReadOnlyClientBlocking() { try { @@ -383,7 +399,7 @@ public virtual IRedisClient GetReadOnlyClient() if (inActiveClient != null) { ReadPoolIndex++; - inActiveClient.Active = true; + inActiveClient.Activate(); InitClient(inActiveClient); @@ -451,7 +467,7 @@ private int GetInActiveReadClient(out RedisClient inactiveClient) { var desiredIndex = ReadPoolIndex % readClients.Length; //this will loop through all hosts in readClients once even though there are 2 for loops - //both loops are used to try to get the prefered host according to the round robin algorithm + //both loops are used to try to get the preferred host according to the round robin algorithm var readOnlyTotal = RedisResolver.ReadOnlyHostsCount; for (int x = 0; x < readOnlyTotal; x++) { @@ -485,7 +501,7 @@ private int GetInActiveReadClient(out RedisClient inactiveClient) private RedisClient InitNewClient(RedisClient client) { client.Id = Interlocked.Increment(ref RedisClientCounter); - client.Active = true; + client.Activate(newClient:true); client.ClientManager = this; client.ConnectionFilter = ConnectionFilter; if (NamespacePrefix != null) @@ -511,6 +527,20 @@ private RedisClient InitClient(RedisClient client) return client; } + partial void PulseAllReadAsync(); + private void PulseAllRead() + { + PulseAllReadAsync(); + Monitor.PulseAll(readClients); + } + + partial void PulseAllWriteAsync(); + private void PulseAllWrite() + { + PulseAllWriteAsync(); + Monitor.PulseAll(writeClients); + } + public void DisposeClient(RedisNativeClient client) { lock (readClients) @@ -525,10 +555,11 @@ public void DisposeClient(RedisNativeClient client) } else { - client.Active = false; + client.TrackThread = null; + client.Deactivate(); } - Monitor.PulseAll(readClients); + PulseAllRead(); return; } } @@ -545,19 +576,25 @@ public void DisposeClient(RedisNativeClient client) } else { - client.Active = false; + client.TrackThread = null; + client.Deactivate(); } - Monitor.PulseAll(writeClients); + PulseAllWrite(); return; } } //Client not found in any pool, pulse both pools. lock (readClients) - Monitor.PulseAll(readClients); + { + PulseAllRead(); + } + lock (writeClients) - Monitor.PulseAll(writeClients); + { + PulseAllWrite(); + } } /// @@ -568,8 +605,8 @@ public void DisposeReadOnlyClient(RedisNativeClient client) { lock (readClients) { - client.Active = false; - Monitor.PulseAll(readClients); + client.Deactivate(); + PulseAllRead(); } } @@ -581,8 +618,8 @@ public void DisposeWriteClient(RedisNativeClient client) { lock (writeClients) { - client.Active = false; - Monitor.PulseAll(writeClients); + client.Deactivate(); + PulseAllWrite(); } } @@ -646,7 +683,7 @@ public Dictionary GetStats() var ret = new Dictionary { - {"VersionString", "" + Text.Env.VersionString}, + {"VersionString", "" + Env.VersionString}, {"writeClientsPoolSize", "" + writeClientsPoolSize}, {"writeClientsCreated", "" + writeClientsCreated}, @@ -760,9 +797,7 @@ protected void Dispose(RedisClient redisClient) } catch (Exception ex) { - Log.Error(string.Format( - "Error when trying to dispose of RedisClient to host {0}:{1}", - redisClient.Host, redisClient.Port), ex); + Log.Error($"Error when trying to dispose of RedisClient to host {redisClient.Host}:{redisClient.Port}", ex); } } @@ -802,7 +837,7 @@ public DisposablePooledClient(PooledRedisClientManager clientManager) /// /// access the wrapped client /// - public T Client { get { return client; } } + public T Client => client; /// /// release the wrapped client back to the pool diff --git a/src/ServiceStack.Redis/Properties/AssemblyInfo.cs b/src/ServiceStack.Redis/Properties/AssemblyInfo.cs index cd61416d..4ce9daba 100644 --- a/src/ServiceStack.Redis/Properties/AssemblyInfo.cs +++ b/src/ServiceStack.Redis/Properties/AssemblyInfo.cs @@ -1,36 +1,5 @@ -using System.Reflection; -using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -// General Information about an assembly is controlled through the following -// set of attributes. Change these attribute values to modify the information -// associated with an assembly. -[assembly: AssemblyTitle("ServiceStack.Redis")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("ServiceStack.Redis")] -[assembly: AssemblyCopyright("Copyright © ServiceStack 2013")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from -// COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] - -// The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("70a33fa7-9f81-418d-bb25-6a4be6648ae4")] - -// Version information for an assembly consists of the following four values: -// -// Major Version -// Minor Version -// Build Number -// Revision -// -// You can specify all the values or you can default the Build and Revision Numbers -// by using the '*' as shown below: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("4.0.0.0")] -//[assembly: AssemblyFileVersion("4.0.0.0")] +[assembly: System.Reflection.AssemblyVersion("6.0.0.0")] \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient.Async.cs b/src/ServiceStack.Redis/RedisClient.Async.cs new file mode 100644 index 00000000..6648a224 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient.Async.cs @@ -0,0 +1,1558 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis/ +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Caching; +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisClient : IRedisClientAsync, IRemoveByPatternAsync, ICacheClientAsync, IAsyncDisposable + { + /// + /// Access this instance for async usage + /// + public IRedisClientAsync AsAsync() => this; + + // the typed client implements this for us + IRedisTypedClientAsync IRedisClientAsync.As() => (IRedisTypedClientAsync)As(); + + // convenience since we're not saturating the public API; this makes it easy to call + // the explicit interface implementations; the JIT should make this a direct call + private IRedisNativeClientAsync NativeAsync => this; + + IHasNamed IRedisClientAsync.Lists => Lists as IHasNamed ?? throw new NotSupportedException($"The provided Lists ({Lists?.GetType().FullName}) does not support IRedisListAsync"); + IHasNamed IRedisClientAsync.Sets => Sets as IHasNamed ?? throw new NotSupportedException($"The provided Sets ({Sets?.GetType().FullName})does not support IRedisSetAsync"); + IHasNamed IRedisClientAsync.SortedSets => SortedSets as IHasNamed ?? throw new NotSupportedException($"The provided SortedSets ({SortedSets?.GetType().FullName})does not support IRedisSortedSetAsync"); + IHasNamed IRedisClientAsync.Hashes => Hashes as IHasNamed ?? throw new NotSupportedException($"The provided Hashes ({Hashes?.GetType().FullName})does not support IRedisHashAsync"); + + internal ValueTask RegisterTypeIdAsync(T value, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + var id = value.GetId().ToString(); + + return RegisterTypeIdAsync(typeIdsSetKey, id, token); + } + internal ValueTask RegisterTypeIdAsync(string typeIdsSetKey, string id, CancellationToken token) + { + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + registeredTypeIdsWithinPipeline.Add(id); + return default; + } + else + { + return AsAsync().AddItemToSetAsync(typeIdsSetKey, id, token); + } + } + + // Called just after original Pipeline is closed. + internal async ValueTask AddTypeIdsRegisteredDuringPipelineAsync(CancellationToken token) + { + foreach (var entry in registeredTypeIdsWithinPipelineMap) + { + await AsAsync().AddRangeToSetAsync(entry.Key, entry.Value.ToList(), token).ConfigureAwait(false); + } + registeredTypeIdsWithinPipelineMap = new Dictionary>(); + } + + + ValueTask IRedisClientAsync.GetServerTimeAsync(CancellationToken token) + => NativeAsync.TimeAsync(token).Await(parts => ParseTimeResult(parts)); + + IRedisPipelineAsync IRedisClientAsync.CreatePipeline() + => new RedisAllPurposePipeline(this); + + ValueTask IRedisClientAsync.CreateTransactionAsync(CancellationToken token) + { + AssertServerVersionNumber(); // pre-fetch call to INFO before transaction if needed + return new RedisTransaction(this, true).AsValueTaskResult(); // note that the MULTI here will be held and flushed async + } + + ValueTask IRedisClientAsync.RemoveEntryAsync(string[] keys, CancellationToken token) + => keys.Length == 0 ? default : NativeAsync.DelAsync(keys, token).IsSuccessAsync(); + + private async ValueTask ExecAsync(Func action) + { + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) + { + await action(this).ConfigureAwait(false); + } + } + + private async ValueTask ExecAsync(Func> action) + { + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) + { + var ret = await action(this).ConfigureAwait(false); + return ret; + } + } + + ValueTask IRedisClientAsync.SetValueAsync(string key, string value, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + return NativeAsync.SetAsync(key, bytesValue, token: token); + } + + ValueTask IRedisClientAsync.GetValueAsync(string key, CancellationToken token) + => NativeAsync.GetAsync(key, token).FromUtf8BytesAsync(); + + Task ICacheClientAsync.GetAsync(string key, CancellationToken token) + { + return ExecAsync(async r => { + if (typeof(T) == typeof(byte[])) + { + var ret = await ((IRedisNativeClientAsync) r).GetAsync(key, token).ConfigureAwait(false); + return (T) (object) ret; + } + else + { + var val = await r.GetValueAsync(key, token).ConfigureAwait(false); + var ret = JsonSerializer.DeserializeFromString(val); + return ret; + } + }).AsTask(); + } + + async ValueTask> IRedisClientAsync.SearchKeysAsync(string pattern, CancellationToken token) + { + var list = new List(); + await foreach (var value in ((IRedisClientAsync)this).ScanAllKeysAsync(pattern, token: token).WithCancellation(token).ConfigureAwait(false)) + { + list.Add(value); + } + return list; + } + + async IAsyncEnumerable IRedisClientAsync.ScanAllKeysAsync(string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + ScanResult ret = default; + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.ScanAsync(ret?.Cursor ?? 0, pageSize, match: pattern, token: token) + : NativeAsync.ScanAsync(ret?.Cursor ?? 0, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var key in ret.Results) + { + yield return key.FromUtf8Bytes(); + } + + if (ret.Cursor == 0) break; + } + } + + ValueTask IRedisClientAsync.GetEntryTypeAsync(string key, CancellationToken token) + => NativeAsync.TypeAsync(key, token).Await((val, state) => state.ParseEntryType(val), this); + + ValueTask IRedisClientAsync.AddItemToSetAsync(string setId, string item, CancellationToken token) + => NativeAsync.SAddAsync(setId, item.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.AddItemToListAsync(string listId, string value, CancellationToken token) + => NativeAsync.RPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.AddItemToSortedSetAsync(string setId, string value, CancellationToken token) + => ((IRedisClientAsync)this).AddItemToSortedSetAsync(setId, value, GetLexicalScore(value), token); + + ValueTask IRedisClientAsync.AddItemToSortedSetAsync(string setId, string value, double score, CancellationToken token) + => NativeAsync.ZAddAsync(setId, score, value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetEntryInHashAsync(string hashId, string key, string value, CancellationToken token) + => NativeAsync.HSetAsync(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetAllAsync(IDictionary map, CancellationToken token) + => GetSetAllBytes(map, out var keyBytes, out var valBytes) ? NativeAsync.MSetAsync(keyBytes, valBytes, token) : default; + + ValueTask IRedisClientAsync.SetAllAsync(IEnumerable keys, IEnumerable values, CancellationToken token) + => GetSetAllBytes(keys, values, out var keyBytes, out var valBytes) ? NativeAsync.MSetAsync(keyBytes, valBytes, token) : default; + + Task ICacheClientAsync.SetAllAsync(IDictionary values, CancellationToken token) + { + if (values.Count != 0) + { + return ExecAsync(r => + { + // need to do this inside Exec for the JSON config bits + GetSetAllBytesTyped(values, out var keys, out var valBytes); + return ((IRedisNativeClientAsync)r).MSetAsync(keys, valBytes, token); + }).AsTask(); + } + else + { + return Task.CompletedTask; + } + } + + ValueTask IRedisClientAsync.RenameKeyAsync(string fromName, string toName, CancellationToken token) + => NativeAsync.RenameAsync(fromName, toName, token); + + ValueTask IRedisClientAsync.ContainsKeyAsync(string key, CancellationToken token) + => NativeAsync.ExistsAsync(key, token).IsSuccessAsync(); + + + ValueTask IRedisClientAsync.GetRandomKeyAsync(CancellationToken token) + => NativeAsync.RandomKeyAsync(token); + + ValueTask IRedisClientAsync.SelectAsync(long db, CancellationToken token) + => NativeAsync.SelectAsync(db, token); + + ValueTask IRedisClientAsync.ExpireEntryInAsync(string key, TimeSpan expireIn, CancellationToken token) + => UseMillisecondExpiration(expireIn) + ? NativeAsync.PExpireAsync(key, (long)expireIn.TotalMilliseconds, token) + : NativeAsync.ExpireAsync(key, (int)expireIn.TotalSeconds, token); + + ValueTask IRedisClientAsync.ExpireEntryAtAsync(string key, DateTime expireAt, CancellationToken token) + => AssertServerVersionNumber() >= 2600 + ? NativeAsync.PExpireAtAsync(key, ConvertToServerDate(expireAt).ToUnixTimeMs(), token) + : NativeAsync.ExpireAtAsync(key, ConvertToServerDate(expireAt).ToUnixTime(), token); + + Task ICacheClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + => NativeAsync.TtlAsync(key, token).Await(ParseTimeToLiveResult).AsTask(); + + ValueTask IRedisClientAsync.PingAsync(CancellationToken token) + => NativeAsync.PingAsync(token); + + ValueTask IRedisClientAsync.EchoAsync(string text, CancellationToken token) + => NativeAsync.EchoAsync(text, token); + + ValueTask IRedisClientAsync.ForegroundSaveAsync(CancellationToken token) + => NativeAsync.SaveAsync(token); + + ValueTask IRedisClientAsync.BackgroundSaveAsync(CancellationToken token) + => NativeAsync.BgSaveAsync(token); + + ValueTask IRedisClientAsync.ShutdownAsync(CancellationToken token) + => NativeAsync.ShutdownAsync(false, token); + + ValueTask IRedisClientAsync.ShutdownNoSaveAsync(CancellationToken token) + => NativeAsync.ShutdownAsync(true, token); + + ValueTask IRedisClientAsync.BackgroundRewriteAppendOnlyFileAsync(CancellationToken token) + => NativeAsync.BgRewriteAofAsync(token); + + ValueTask IRedisClientAsync.FlushDbAsync(CancellationToken token) + => NativeAsync.FlushDbAsync(token); + + ValueTask> IRedisClientAsync.GetValuesAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new List().AsValueTaskResult(); + + return NativeAsync.MGetAsync(keys.ToArray(), token).Await(ParseGetValuesResult); + } + + ValueTask> IRedisClientAsync.GetValuesAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new List().AsValueTaskResult(); + + return NativeAsync.MGetAsync(keys.ToArray(), token).Await(ParseGetValuesResult); + } + + ValueTask> IRedisClientAsync.GetValuesMapAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new Dictionary().AsValueTaskResult(); + + var keysArray = keys.ToArray(); + return NativeAsync.MGetAsync(keysArray, token).Await((resultBytesArray, state) => ParseGetValuesMapResult(state, resultBytesArray), keysArray); + } + + ValueTask> IRedisClientAsync.GetValuesMapAsync(List keys, CancellationToken token) + { + if (keys == null) throw new ArgumentNullException(nameof(keys)); + if (keys.Count == 0) return new Dictionary().AsValueTaskResult(); + + var keysArray = keys.ToArray(); + return NativeAsync.MGetAsync(keysArray, token).Await((resultBytesArray, state) => ParseGetValuesMapResult(state, resultBytesArray), keysArray); + } + + ValueTask IRedisClientAsync.AcquireLockAsync(string key, TimeSpan? timeOut, CancellationToken token) + => RedisLock.CreateAsync(this, key, timeOut, token).Await(value => value); + + ValueTask IRedisClientAsync.SetValueAsync(string key, string value, TimeSpan expireIn, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + + if (AssertServerVersionNumber() >= 2610) + { + PickTime(expireIn, out var seconds, out var milliseconds); + return NativeAsync.SetAsync(key, bytesValue, expirySeconds: seconds, + expiryMilliseconds: milliseconds, token: token); + } + else + { + return NativeAsync.SetExAsync(key, (int)expireIn.TotalSeconds, bytesValue, token); + } + } + + static void PickTime(TimeSpan? value, out long expirySeconds, out long expiryMilliseconds) + { + expirySeconds = expiryMilliseconds = 0; + if (value.HasValue) + { + var expireIn = value.GetValueOrDefault(); + if (expireIn.Milliseconds > 0) + { + expiryMilliseconds = (long)expireIn.TotalMilliseconds; + } + else + { + expirySeconds = (long)expireIn.TotalSeconds; + } + } + } + ValueTask IRedisClientAsync.SetValueIfNotExistsAsync(string key, string value, TimeSpan? expireIn, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + PickTime(expireIn, out var seconds, out var milliseconds); + return NativeAsync.SetAsync(key, bytesValue, false, seconds, milliseconds, token); + } + + ValueTask IRedisClientAsync.SetValueIfExistsAsync(string key, string value, TimeSpan? expireIn, CancellationToken token) + { + var bytesValue = value?.ToUtf8Bytes(); + PickTime(expireIn, out var seconds, out var milliseconds); + return NativeAsync.SetAsync(key, bytesValue, true, seconds, milliseconds, token); + } + + ValueTask IRedisClientAsync.WatchAsync(string[] keys, CancellationToken token) + => NativeAsync.WatchAsync(keys, token); + + ValueTask IRedisClientAsync.UnWatchAsync(CancellationToken token) + => NativeAsync.UnWatchAsync(token); + + ValueTask IRedisClientAsync.AppendToValueAsync(string key, string value, CancellationToken token) + => NativeAsync.AppendAsync(key, value.ToUtf8Bytes(), token); + + async ValueTask IRedisClientAsync.StoreObjectAsync(object entity, CancellationToken token) + { + if (entity == null) throw new ArgumentNullException(nameof(entity)); + + var id = entity.GetObjectId(); + var entityType = entity.GetType(); + var urnKey = UrnKey(entityType, id); + var valueString = JsonSerializer.SerializeToString(entity); + + await ((IRedisClientAsync)this).SetValueAsync(urnKey, valueString, token).ConfigureAwait(false); + + await RegisterTypeIdAsync(GetTypeIdsSetKey(entityType), id.ToString(), token).ConfigureAwait(false); + + return entity; + } + + ValueTask IRedisClientAsync.PopItemFromSetAsync(string setId, CancellationToken token) + => NativeAsync.SPopAsync(setId, token).FromUtf8BytesAsync(); + + ValueTask> IRedisClientAsync.PopItemsFromSetAsync(string setId, int count, CancellationToken token) + => NativeAsync.SPopAsync(setId, count, token).ToStringListAsync(); + + ValueTask IRedisClientAsync.SlowlogResetAsync(CancellationToken token) + => NativeAsync.SlowlogResetAsync(token); + + ValueTask IRedisClientAsync.GetSlowlogAsync(int? numberOfRecords, CancellationToken token) + => NativeAsync.SlowlogGetAsync(numberOfRecords, token).Await(ParseSlowlog); + + + Task ICacheClientAsync.SetAsync(string key, T value, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), token: token)).AwaitAsTrueTask(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, CancellationToken token) + => NativeAsync.ZCardAsync(setId, token); + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetSortedSetCountAsync(setId, fromScore, toScore, token); + } + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, double fromScore, double toScore, CancellationToken token) + => NativeAsync.ZCountAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.GetSortedSetCountAsync(string setId, long fromScore, long toScore, CancellationToken token) + => NativeAsync.ZCountAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.GetItemScoreInSortedSetAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZScoreAsync(setId, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.CustomAsync(object[] cmdWithArgs, CancellationToken token) + => RawCommandAsync(token, cmdWithArgs).Await(result => result.ToRedisText()); + + ValueTask IRedisClientAsync.SetValuesAsync(IDictionary map, CancellationToken token) + => ((IRedisClientAsync)this).SetAllAsync(map, token); + + Task ICacheClientAsync.SetAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + AssertNotInTransaction(); + return ExecAsync(async r => + { + await r.SetAsync(key, value, token).ConfigureAwait(false); + await r.ExpireEntryAtAsync(key, ConvertToServerDate(expiresAt), token).ConfigureAwait(false); + }).AwaitAsTrueTask(); + } + Task ICacheClientAsync.SetAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + if (AssertServerVersionNumber() >= 2600) + { + return ExecAsync(r => ((IRedisNativeClientAsync)r) + .SetAsync(key, ToBytes(value), 0, expiryMilliseconds: (long)expiresIn.TotalMilliseconds, token)).AwaitAsTrueTask(); + } + else + { + return ExecAsync(r => ((IRedisNativeClientAsync)r) + .SetExAsync(key, (int)expiresIn.TotalSeconds, ToBytes(value), token)).AwaitAsTrueTask(); + } + } + + Task ICacheClientAsync.FlushAllAsync(CancellationToken token) + => NativeAsync.FlushAllAsync(token).AsTask(); + + Task> ICacheClientAsync.GetAllAsync(IEnumerable keys, CancellationToken token) + { + return ExecAsync(r => + { + var keysArray = keys.ToArray(); + + return ((IRedisNativeClientAsync)r).MGetAsync(keysArray, token).Await((keyValues, state) => ProcessGetAllResult(state, keyValues), keysArray); + }).AsTask(); + } + + Task ICacheClientAsync.RemoveAsync(string key, CancellationToken token) + => NativeAsync.DelAsync(key, token).IsSuccessTaskAsync(); + + IAsyncEnumerable ICacheClientAsync.GetKeysByPatternAsync(string pattern, CancellationToken token) + => AsAsync().ScanAllKeysAsync(pattern, token: token); + + Task ICacheClientAsync.RemoveExpiredEntriesAsync(CancellationToken token) + { + //Redis automatically removed expired Cache Entries + return Task.CompletedTask; + } + + async Task IRemoveByPatternAsync.RemoveByPatternAsync(string pattern, CancellationToken token) + { + List buffer = null; + const int BATCH_SIZE = 1024; + await foreach (var key in AsAsync().ScanAllKeysAsync(pattern, token: token).WithCancellation(token).ConfigureAwait(false)) + { + (buffer ??= new List()).Add(key); + if (buffer.Count == BATCH_SIZE) + { + await NativeAsync.DelAsync(buffer.ToArray(), token).ConfigureAwait(false); + buffer.Clear(); + } + } + if (buffer is object && buffer.Count != 0) + { + await NativeAsync.DelAsync(buffer.ToArray(), token).ConfigureAwait(false); + } + } + + Task IRemoveByPatternAsync.RemoveByRegexAsync(string regex, CancellationToken token) + => AsAsync().RemoveByPatternAsync(RegexToGlob(regex), token); + + Task ICacheClientAsync.RemoveAllAsync(IEnumerable keys, CancellationToken token) + => ExecAsync(r => r.RemoveEntryAsync(keys.ToArray(), token)).AsTask(); + + Task ICacheClientAsync.IncrementAsync(string key, uint amount, CancellationToken token) + => ExecAsync(r => r.IncrementValueByAsync(key, (int)amount, token)).AsTask(); + + Task ICacheClientAsync.DecrementAsync(string key, uint amount, CancellationToken token) + => ExecAsync(r => r.DecrementValueByAsync(key, (int)amount, token)).AsTask(); + + + Task ICacheClientAsync.AddAsync(string key, T value, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: false, token: token)).AsTask(); + + Task ICacheClientAsync.ReplaceAsync(string key, T value, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: true, token: token)).AsTask(); + + Task ICacheClientAsync.AddAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + AssertNotInTransaction(); + + return ExecAsync(async r => + { + if (await r.AddAsync(key, value, token).ConfigureAwait(false)) + { + await r.ExpireEntryAtAsync(key, ConvertToServerDate(expiresAt), token).ConfigureAwait(false); + return true; + } + return false; + }).AsTask(); + } + + Task ICacheClientAsync.ReplaceAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + AssertNotInTransaction(); + + return ExecAsync(async r => + { + if (await r.ReplaceAsync(key, value, token).ConfigureAwait(false)) + { + await r.ExpireEntryAtAsync(key, ConvertToServerDate(expiresAt), token).ConfigureAwait(false); + return true; + } + return false; + }).AsTask(); + } + + Task ICacheClientAsync.AddAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: false, token: token)).AsTask(); + + Task ICacheClientAsync.ReplaceAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + => ExecAsync(r => ((IRedisNativeClientAsync)r).SetAsync(key, ToBytes(value), exists: true, token: token)).AsTask(); + + ValueTask IRedisClientAsync.DbSizeAsync(CancellationToken token) + => NativeAsync.DbSizeAsync(token); + + ValueTask> IRedisClientAsync.InfoAsync(CancellationToken token) + => NativeAsync.InfoAsync(token); + + ValueTask IRedisClientAsync.LastSaveAsync(CancellationToken token) + => NativeAsync.LastSaveAsync(token); + + async Task IEntityStoreAsync.GetByIdAsync(object id, CancellationToken token) + { + var key = UrnKey(id); + var valueString = await AsAsync().GetValueAsync(key, token).ConfigureAwait(false); + var value = JsonSerializer.DeserializeFromString(valueString); + return value; + } + + async Task> IEntityStoreAsync.GetByIdsAsync(ICollection ids, CancellationToken token) + { + if (ids == null || ids.Count == 0) + return new List(); + + var urnKeys = ids.Cast().Map(UrnKey); + return await AsAsync().GetValuesAsync(urnKeys, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.StoreAsync(T entity, CancellationToken token) + { + var urnKey = UrnKey(entity); + var valueString = JsonSerializer.SerializeToString(entity); + + await AsAsync().SetValueAsync(urnKey, valueString, token).ConfigureAwait(false); + await RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + + return entity; + } + + Task IEntityStoreAsync.StoreAllAsync(IEnumerable entities, CancellationToken token) + => StoreAllAsyncImpl(entities, token).AsTask(); + + internal async ValueTask StoreAllAsyncImpl(IEnumerable entities, CancellationToken token) + { + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + await NativeAsync.MSetAsync(keys, values, token).ConfigureAwait(false); + await RegisterTypeIdsAsync(entitiesList, token).ConfigureAwait(false); + } + } + + internal ValueTask RegisterTypeIdsAsync(IEnumerable values, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + var ids = values.Map(x => x.GetId().ToString()); + + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + ids.ForEach(x => registeredTypeIdsWithinPipeline.Add(x)); + return default; + } + else + { + return AsAsync().AddRangeToSetAsync(typeIdsSetKey, ids, token); + } + } + + internal ValueTask RemoveTypeIdsByValueAsync(T value, CancellationToken token) => + RemoveTypeIdsByIdAsync(value.GetId().ToString(), token); + internal async ValueTask RemoveTypeIdsByValuesAsync(T[] values, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + values.Each(x => registeredTypeIdsWithinPipeline.Remove(x.GetId().ToString())); + } + else + { + foreach (var x in values) + { + await AsAsync().RemoveItemFromSetAsync(typeIdsSetKey, x.GetId().ToString(), token).ConfigureAwait(false); + } + } + } + + internal async ValueTask RemoveTypeIdsByIdAsync(string id, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey).Remove(id); + else + { + await AsAsync().RemoveItemFromSetAsync(typeIdsSetKey, id, token).ConfigureAwait(false); + } + } + + internal async ValueTask RemoveTypeIdsByIdsAsync(string[] ids, CancellationToken token) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + { + var registeredTypeIdsWithinPipeline = GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey); + ids.Each(x => registeredTypeIdsWithinPipeline.Remove(x)); + } + else + { + foreach (var x in ids) + { + await AsAsync().RemoveItemFromSetAsync(typeIdsSetKey, x, token).ConfigureAwait(false); + } + } + } + + async Task IEntityStoreAsync.DeleteAsync(T entity, CancellationToken token) + { + var urnKey = UrnKey(entity); + await AsAsync().RemoveAsync(urnKey, token).ConfigureAwait(false); + await this.RemoveTypeIdsByValueAsync(entity, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdAsync(object id, CancellationToken token) + { + var urnKey = UrnKey(id); + await AsAsync().RemoveAsync(urnKey, token).ConfigureAwait(false); + await this.RemoveTypeIdsByIdAsync(id.ToString(), token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteByIdsAsync(ICollection ids, CancellationToken token) + { + if (ids == null || ids.Count == 0) return; + + var idStrings = ids.Cast().Select(x => x.ToString()).ToArray(); + var urnKeys = idStrings.Select(UrnKey).ToArray(); + await AsAsync().RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + await this.RemoveTypeIdsByIdsAsync(idStrings, token).ConfigureAwait(false); + } + + async Task IEntityStoreAsync.DeleteAllAsync(CancellationToken token) + { + await DeleteAllAsync(0, RedisConfig.CommandKeysBatchSize, token).ConfigureAwait(false); + } + + private async Task DeleteAllAsync(ulong cursor, int batchSize, CancellationToken token) + { + var typeIdsSetKey = this.GetTypeIdsSetKey(); + var asyncClient = AsAsync(); + do + { + var scanResult = await NativeAsync.SScanAsync(typeIdsSetKey, cursor, batchSize, token: token).ConfigureAwait(false); + cursor = scanResult.Cursor; + var urnKeys = scanResult.Results.Select(id => UrnKey(id.FromUtf8Bytes())).ToArray(); + if (urnKeys.Length > 0) + { + await asyncClient.RemoveEntryAsync(urnKeys, token).ConfigureAwait(false); + } + } while (cursor != 0); + await asyncClient.RemoveEntryAsync(new[] { typeIdsSetKey }, token).ConfigureAwait(false); + } + + ValueTask> IRedisClientAsync.SearchSortedSetAsync(string setId, string start, string end, int? skip, int? take, CancellationToken token) + { + start = GetSearchStart(start); + end = GetSearchEnd(end); + + return NativeAsync.ZRangeByLexAsync(setId, start, end, skip, take, token).ToStringListAsync(); + } + + ValueTask IRedisClientAsync.SearchSortedSetCountAsync(string setId, string start, string end, CancellationToken token) + => NativeAsync.ZLexCountAsync(setId, GetSearchStart(start), GetSearchEnd(end), token); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetBySearchAsync(string setId, string start, string end, CancellationToken token) + => NativeAsync.ZRemRangeByLexAsync(setId, GetSearchStart(start), GetSearchEnd(end), token); + + ValueTask IRedisClientAsync.TypeAsync(string key, CancellationToken token) + => NativeAsync.TypeAsync(key, token); + + ValueTask IRedisClientAsync.GetStringCountAsync(string key, CancellationToken token) + => NativeAsync.StrLenAsync(key, token); + + ValueTask IRedisClientAsync.GetSetCountAsync(string setId, CancellationToken token) + => NativeAsync.SCardAsync(setId, token); + + ValueTask IRedisClientAsync.GetListCountAsync(string listId, CancellationToken token) + => NativeAsync.LLenAsync(listId, token); + + ValueTask IRedisClientAsync.GetHashCountAsync(string hashId, CancellationToken token) + => NativeAsync.HLenAsync(hashId, token); + + async ValueTask IRedisClientAsync.ExecCachedLuaAsync(string scriptBody, Func> scriptSha1, CancellationToken token) + { + if (!CachedLuaSha1Map.TryGetValue(scriptBody, out var sha1)) + CachedLuaSha1Map[scriptBody] = sha1 = await AsAsync().LoadLuaScriptAsync(scriptBody, token).ConfigureAwait(false); + + try + { + return await scriptSha1(sha1).ConfigureAwait(false); + } + catch (RedisResponseException ex) + { + if (!ex.Message.StartsWith("NOSCRIPT")) + throw; + + CachedLuaSha1Map[scriptBody] = sha1 = await AsAsync().LoadLuaScriptAsync(scriptBody, token).ConfigureAwait(false); + return await scriptSha1(sha1).ConfigureAwait(false); + } + } + + ValueTask IRedisClientAsync.ExecLuaAsync(string luaBody, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalCommandAsync(luaBody, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token).Await(data => data.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaShaAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaCommandAsync(sha1, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token).Await(data => data.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaAsStringAsync(string luaBody, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalStrAsync(luaBody, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsStringAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaStrAsync(sha1, keys?.Length ?? 0, MergeAndConvertToBytes(keys, args), token); + + ValueTask IRedisClientAsync.LoadLuaScriptAsync(string body, CancellationToken token) + => NativeAsync.ScriptLoadAsync(body, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.WriteAllAsync(IEnumerable entities, CancellationToken token) + => PrepareWriteAll(entities, out var keys, out var values) ? NativeAsync.MSetAsync(keys, values, token) : default; + + async ValueTask> IRedisClientAsync.GetAllItemsFromSetAsync(string setId, CancellationToken token) + { + var multiDataList = await NativeAsync.SMembersAsync(setId, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + async ValueTask IRedisClientAsync.AddRangeToSetAsync(string setId, List items, CancellationToken token) + { + if (await AddRangeToSetNeedsSendAsync(setId, items).ConfigureAwait(false)) + { + var uSetId = setId.ToUtf8Bytes(); + var pipeline = CreatePipelineCommand(); + foreach (var item in items) + { + pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); + } + await pipeline.FlushAsync(token).ConfigureAwait(false); + + //the number of items after + _ = await pipeline.ReadAllAsIntsAsync(token).ConfigureAwait(false); + } + } + + async ValueTask AddRangeToSetNeedsSendAsync(string setId, List items) + { + if (setId.IsNullOrEmpty()) + throw new ArgumentNullException("setId"); + if (items == null) + throw new ArgumentNullException("items"); + if (items.Count == 0) + return false; + + if (this.Transaction is object || this.PipelineAsync is object) + { + var queueable = this.Transaction as IRedisQueueableOperationAsync + ?? this.Pipeline as IRedisQueueableOperationAsync; + + if (queueable == null) + throw new NotSupportedException("Cannot AddRangeToSetAsync() when Transaction is: " + this.Transaction.GetType().Name); + + //Complete the first QueuedCommand() + await AsAsync().AddItemToSetAsync(setId, items[0]).ConfigureAwait(false); + + //Add subsequent queued commands + for (var i = 1; i < items.Count; i++) + { + var item = items[i]; + queueable.QueueCommand(c => c.AddItemToSetAsync(setId, item)); + } + return false; + } + else + { + return true; + } + } + + ValueTask IRedisClientAsync.RemoveItemFromSetAsync(string setId, string item, CancellationToken token) + => NativeAsync.SRemAsync(setId, item.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.IncrementValueByAsync(string key, int count, CancellationToken token) + => NativeAsync.IncrByAsync(key, count, token); + + ValueTask IRedisClientAsync.IncrementValueByAsync(string key, long count, CancellationToken token) + => NativeAsync.IncrByAsync(key, count, token); + + ValueTask IRedisClientAsync.IncrementValueByAsync(string key, double count, CancellationToken token) + => NativeAsync.IncrByFloatAsync(key, count, token); + ValueTask IRedisClientAsync.IncrementValueAsync(string key, CancellationToken token) + => NativeAsync.IncrAsync(key, token); + + ValueTask IRedisClientAsync.DecrementValueAsync(string key, CancellationToken token) + => NativeAsync.DecrAsync(key, token); + + ValueTask IRedisClientAsync.DecrementValueByAsync(string key, int count, CancellationToken token) + => NativeAsync.DecrByAsync(key, count, token); + + async ValueTask IRedisClientAsync.GetServerRoleAsync(CancellationToken token) + { + if (AssertServerVersionNumber() >= 2812) + { + var text = await NativeAsync.RoleAsync(token).ConfigureAwait(false); + var roleName = text.Children[0].Text; + return ToServerRole(roleName); + } + + var info = await AsAsync().InfoAsync(token).ConfigureAwait(false); + info.TryGetValue("role", out var role); + return ToServerRole(role); + } + + ValueTask IRedisClientAsync.GetServerRoleInfoAsync(CancellationToken token) + => NativeAsync.RoleAsync(token); + + async ValueTask IRedisClientAsync.GetConfigAsync(string configItem, CancellationToken token) + { + var byteArray = await NativeAsync.ConfigGetAsync(configItem, token).ConfigureAwait(false); + return GetConfigParse(byteArray); + } + + ValueTask IRedisClientAsync.SetConfigAsync(string configItem, string value, CancellationToken token) + => NativeAsync.ConfigSetAsync(configItem, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.SaveConfigAsync(CancellationToken token) + => NativeAsync.ConfigRewriteAsync(token); + + ValueTask IRedisClientAsync.ResetInfoStatsAsync(CancellationToken token) + => NativeAsync.ConfigResetStatAsync(token); + + ValueTask IRedisClientAsync.GetClientAsync(CancellationToken token) + => NativeAsync.ClientGetNameAsync(token); + + ValueTask IRedisClientAsync.SetClientAsync(string name, CancellationToken token) + => NativeAsync.ClientSetNameAsync(name, token); + + ValueTask IRedisClientAsync.KillClientAsync(string address, CancellationToken token) + => NativeAsync.ClientKillAsync(address, token); + + ValueTask IRedisClientAsync.KillClientsAsync(string fromAddress, string withId, RedisClientType? ofType, bool? skipMe, CancellationToken token) + { + var typeString = ofType?.ToString().ToLower(); + var skipMeString = skipMe.HasValue ? (skipMe.Value ? "yes" : "no") : null; + return NativeAsync.ClientKillAsync(addr: fromAddress, id: withId, type: typeString, skipMe: skipMeString, token); + } + + async ValueTask>> IRedisClientAsync.GetClientsInfoAsync(CancellationToken token) + => GetClientsInfoParse(await NativeAsync.ClientListAsync(token).ConfigureAwait(false)); + + ValueTask IRedisClientAsync.PauseAllClientsAsync(TimeSpan duration, CancellationToken token) + => NativeAsync.ClientPauseAsync((int)duration.TotalMilliseconds, token); + + ValueTask> IRedisClientAsync.GetAllKeysAsync(CancellationToken token) + => AsAsync().SearchKeysAsync("*", token); + + ValueTask IRedisClientAsync.GetAndSetValueAsync(string key, string value, CancellationToken token) + => NativeAsync.GetSetAsync(key, value.ToUtf8Bytes(), token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.GetFromHashAsync(object id, CancellationToken token) + { + var key = UrnKey(id); + return (await AsAsync().GetAllEntriesFromHashAsync(key, token).ConfigureAwait(false)).ToJson().FromJson(); + } + + async ValueTask IRedisClientAsync.StoreAsHashAsync(T entity, CancellationToken token) + { + var key = UrnKey(entity); + var hash = ConvertToHashFn(entity); + await AsAsync().SetRangeInHashAsync(key, hash, token).ConfigureAwait(false); + await RegisterTypeIdAsync(entity, token).ConfigureAwait(false); + } + + ValueTask> IRedisClientAsync.GetSortedEntryValuesAsync(string setId, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, }; + return NativeAsync.SortAsync(setId, sortOptions, token).ToStringListAsync(); + } + + async IAsyncEnumerable IRedisClientAsync.ScanAllSetItemsAsync(string setId, string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + var ret = new ScanResult(); + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.SScanAsync(setId, ret.Cursor, pageSize, match: pattern, token: token) + : NativeAsync.SScanAsync(setId, ret.Cursor, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var key in ret.Results) + { + yield return key.FromUtf8Bytes(); + } + + if (ret.Cursor == 0) break; + } + } + + async IAsyncEnumerable> IRedisClientAsync.ScanAllSortedSetItemsAsync(string setId, string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + var ret = new ScanResult(); + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.ZScanAsync(setId, ret.Cursor, pageSize, match: pattern, token: token) + : NativeAsync.ZScanAsync(setId, ret.Cursor, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var entry in ret.AsItemsWithScores()) + { + yield return entry; + } + + if (ret.Cursor == 0) break; + } + } + + async IAsyncEnumerable> IRedisClientAsync.ScanAllHashEntriesAsync(string hashId, string pattern, int pageSize, [EnumeratorCancellation] CancellationToken token) + { + var ret = new ScanResult(); + while (true) + { + ret = await (pattern != null // note ConfigureAwait is handled below + ? NativeAsync.HScanAsync(hashId, ret.Cursor, pageSize, match: pattern, token: token) + : NativeAsync.HScanAsync(hashId, ret.Cursor, pageSize, token: token) + ).ConfigureAwait(false); + + foreach (var entry in ret.AsKeyValues()) + { + yield return entry; + } + + if (ret.Cursor == 0) break; + } + } + + ValueTask IRedisClientAsync.AddToHyperLogAsync(string key, string[] elements, CancellationToken token) + => NativeAsync.PfAddAsync(key, elements.Map(x => x.ToUtf8Bytes()).ToArray(), token); + + ValueTask IRedisClientAsync.CountHyperLogAsync(string key, CancellationToken token) + => NativeAsync.PfCountAsync(key, token); + + ValueTask IRedisClientAsync.MergeHyperLogsAsync(string toKey, string[] fromKeys, CancellationToken token) + => NativeAsync.PfMergeAsync(toKey, fromKeys, token); + + ValueTask IRedisClientAsync.AddGeoMemberAsync(string key, double longitude, double latitude, string member, CancellationToken token) + => NativeAsync.GeoAddAsync(key, longitude, latitude, member, token); + + ValueTask IRedisClientAsync.AddGeoMembersAsync(string key, RedisGeo[] geoPoints, CancellationToken token) + => NativeAsync.GeoAddAsync(key, geoPoints, token); + + ValueTask IRedisClientAsync.CalculateDistanceBetweenGeoMembersAsync(string key, string fromMember, string toMember, string unit, CancellationToken token) + => NativeAsync.GeoDistAsync(key, fromMember, toMember, unit, token); + + ValueTask IRedisClientAsync.GetGeohashesAsync(string key, string[] members, CancellationToken token) + => NativeAsync.GeoHashAsync(key, members, token); + + ValueTask> IRedisClientAsync.GetGeoCoordinatesAsync(string key, string[] members, CancellationToken token) + => NativeAsync.GeoPosAsync(key, members, token); + + async ValueTask IRedisClientAsync.FindGeoMembersInRadiusAsync(string key, double longitude, double latitude, double radius, string unit, CancellationToken token) + { + var results = await NativeAsync.GeoRadiusAsync(key, longitude, latitude, radius, unit, token: token).ConfigureAwait(false); + return ParseFindGeoMembersResult(results); + } + + ValueTask> IRedisClientAsync.FindGeoResultsInRadiusAsync(string key, double longitude, double latitude, double radius, string unit, int? count, bool? sortByNearest, CancellationToken token) + => NativeAsync.GeoRadiusAsync(key, longitude, latitude, radius, unit, withCoords: true, withDist: true, withHash: true, count: count, asc: sortByNearest, token: token); + + async ValueTask IRedisClientAsync.FindGeoMembersInRadiusAsync(string key, string member, double radius, string unit, CancellationToken token) + { + var results = await NativeAsync.GeoRadiusByMemberAsync(key, member, radius, unit, token: token).ConfigureAwait(false); + return ParseFindGeoMembersResult(results); + } + + ValueTask> IRedisClientAsync.FindGeoResultsInRadiusAsync(string key, string member, double radius, string unit, int? count, bool? sortByNearest, CancellationToken token) + => NativeAsync.GeoRadiusByMemberAsync(key, member, radius, unit, withCoords: true, withDist: true, withHash: true, count: count, asc: sortByNearest, token: token); + + ValueTask IRedisClientAsync.CreateSubscriptionAsync(CancellationToken token) + => new RedisSubscription(this).AsValueTaskResult(); + + ValueTask IRedisClientAsync.PublishMessageAsync(string toChannel, string message, CancellationToken token) + => NativeAsync.PublishAsync(toChannel, message.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.MoveBetweenSetsAsync(string fromSetId, string toSetId, string item, CancellationToken token) + => NativeAsync.SMoveAsync(fromSetId, toSetId, item.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.SetContainsItemAsync(string setId, string item, CancellationToken token) + => NativeAsync.SIsMemberAsync(setId, item.ToUtf8Bytes(), token).IsSuccessAsync(); + + async ValueTask> IRedisClientAsync.GetIntersectFromSetsAsync(string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) + return new HashSet(); + + var multiDataList = await NativeAsync.SInterAsync(setIds, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisClientAsync.StoreIntersectFromSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) return default; + + return NativeAsync.SInterStoreAsync(intoSetId, setIds, token); + } + + async ValueTask> IRedisClientAsync.GetUnionFromSetsAsync(string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) + return new HashSet(); + + var multiDataList = await NativeAsync.SUnionAsync(setIds, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisClientAsync.StoreUnionFromSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + { + if (setIds.Length == 0) return default; + + return NativeAsync.SUnionStoreAsync(intoSetId, setIds, token); + } + + async ValueTask> IRedisClientAsync.GetDifferencesFromSetAsync(string fromSetId, string[] withSetIds, CancellationToken token) + { + if (withSetIds.Length == 0) + return new HashSet(); + + var multiDataList = await NativeAsync.SDiffAsync(fromSetId, withSetIds, token).ConfigureAwait(false); + return CreateHashSet(multiDataList); + } + + ValueTask IRedisClientAsync.StoreDifferencesFromSetAsync(string intoSetId, string fromSetId, string[] withSetIds, CancellationToken token) + { + if (withSetIds.Length == 0) return default; + + return NativeAsync.SDiffStoreAsync(intoSetId, fromSetId, withSetIds, token); + } + + ValueTask IRedisClientAsync.GetRandomItemFromSetAsync(string setId, CancellationToken token) + => NativeAsync.SRandMemberAsync(setId, token).FromUtf8BytesAsync(); + + ValueTask> IRedisClientAsync.GetAllItemsFromListAsync(string listId, CancellationToken token) + => NativeAsync.LRangeAsync(listId, FirstElement, LastElement, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromListAsync(string listId, int startingFrom, int endingAt, CancellationToken token) + => NativeAsync.LRangeAsync(listId, startingFrom, endingAt, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedListAsync(string listId, int startingFrom, int endingAt, CancellationToken token) + { + var sortOptions = new SortOptions { Skip = startingFrom, Take = endingAt, SortAlpha = true }; + return AsAsync().GetSortedItemsFromListAsync(listId, sortOptions, token); + } + + ValueTask> IRedisClientAsync.GetSortedItemsFromListAsync(string listId, SortOptions sortOptions, CancellationToken token) + => NativeAsync.SortAsync(listId, sortOptions, token).ToStringListAsync(); + + async ValueTask IRedisClientAsync.AddRangeToListAsync(string listId, List values, CancellationToken token) + { + var pipeline = AddRangeToListPrepareNonFlushed(listId, values); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + //the number of items after + _ = await pipeline.ReadAllAsIntsAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisClientAsync.PrependItemToListAsync(string listId, string value, CancellationToken token) + => NativeAsync.LPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + async ValueTask IRedisClientAsync.PrependRangeToListAsync(string listId, List values, CancellationToken token) + { + var pipeline = PrependRangeToListPrepareNonFlushed(listId, values); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + //the number of items after + _ = await pipeline.ReadAllAsIntsAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisClientAsync.RemoveAllFromListAsync(string listId, CancellationToken token) + => NativeAsync.LTrimAsync(listId, LastElement, FirstElement, token); + + ValueTask IRedisClientAsync.RemoveStartFromListAsync(string listId, CancellationToken token) + => NativeAsync.LPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingRemoveStartFromListAsync(string listId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BLPopValueAsync(listId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.BlockingRemoveStartFromListsAsync(string[] listIds, TimeSpan? timeOut, CancellationToken token) + { + var value = await NativeAsync.BLPopValueAsync(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + if (value == null) + return null; + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } + + ValueTask IRedisClientAsync.RemoveEndFromListAsync(string listId, CancellationToken token) + => NativeAsync.RPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.TrimListAsync(string listId, int keepStartingFrom, int keepEndingAt, CancellationToken token) + => NativeAsync.LTrimAsync(listId, keepStartingFrom, keepEndingAt, token); + + ValueTask IRedisClientAsync.RemoveItemFromListAsync(string listId, string value, CancellationToken token) + => NativeAsync.LRemAsync(listId, 0, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.RemoveItemFromListAsync(string listId, string value, int noOfMatches, CancellationToken token) + => NativeAsync.LRemAsync(listId, 0, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.GetItemFromListAsync(string listId, int listIndex, CancellationToken token) + => NativeAsync.LIndexAsync(listId, listIndex, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.SetItemInListAsync(string listId, int listIndex, string value, CancellationToken token) + => NativeAsync.LSetAsync(listId, listIndex, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.EnqueueItemOnListAsync(string listId, string value, CancellationToken token) + => NativeAsync.LPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.DequeueItemFromListAsync(string listId, CancellationToken token) + => NativeAsync.RPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingDequeueItemFromListAsync(string listId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BRPopValueAsync(listId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.BlockingDequeueItemFromListsAsync(string[] listIds, TimeSpan? timeOut, CancellationToken token) + { + var value = await NativeAsync.BRPopValueAsync(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + if (value == null) + return null; + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } + + ValueTask IRedisClientAsync.PushItemToListAsync(string listId, string value, CancellationToken token) + => NativeAsync.RPushAsync(listId, value.ToUtf8Bytes(), token).Await(); + + ValueTask IRedisClientAsync.PopItemFromListAsync(string listId, CancellationToken token) + => NativeAsync.RPopAsync(listId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingPopItemFromListAsync(string listId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BRPopValueAsync(listId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.BlockingPopItemFromListsAsync(string[] listIds, TimeSpan? timeOut, CancellationToken token) + { + var value = await NativeAsync.BRPopValueAsync(listIds, (int)timeOut.GetValueOrDefault().TotalSeconds, token).ConfigureAwait(false); + if (value == null) + return null; + return new ItemRef { Id = value[0].FromUtf8Bytes(), Item = value[1].FromUtf8Bytes() }; + } + + ValueTask IRedisClientAsync.PopAndPushItemBetweenListsAsync(string fromListId, string toListId, CancellationToken token) + => NativeAsync.RPopLPushAsync(fromListId, toListId, token).FromUtf8BytesAsync(); + + ValueTask IRedisClientAsync.BlockingPopAndPushItemBetweenListsAsync(string fromListId, string toListId, TimeSpan? timeOut, CancellationToken token) + => NativeAsync.BRPopLPushAsync(fromListId, toListId, (int)timeOut.GetValueOrDefault().TotalSeconds, token).FromUtf8BytesAsync(); + + async ValueTask IRedisClientAsync.AddRangeToSortedSetAsync(string setId, List values, double score, CancellationToken token) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToFastUtf8Bytes()); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + return await pipeline.ReadAllAsIntsHaveSuccessAsync(token).ConfigureAwait(false); + } + + async ValueTask IRedisClientAsync.AddRangeToSortedSetAsync(string setId, List values, long score, CancellationToken token) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToUtf8Bytes()); + await pipeline.FlushAsync(token).ConfigureAwait(false); + + return await pipeline.ReadAllAsIntsHaveSuccessAsync(token).ConfigureAwait(false); + } + + ValueTask IRedisClientAsync.RemoveItemFromSortedSetAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRemAsync(setId, value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.RemoveItemsFromSortedSetAsync(string setId, List values, CancellationToken token) + => NativeAsync.ZRemAsync(setId, values.Map(x => x.ToUtf8Bytes()).ToArray(), token); + + async ValueTask IRedisClientAsync.PopItemWithLowestScoreFromSortedSetAsync(string setId, CancellationToken token) + { + //TODO: this should be atomic + var topScoreItemBytes = await NativeAsync.ZRangeAsync(setId, FirstElement, 1, token).ConfigureAwait(false); + if (topScoreItemBytes.Length == 0) return null; + + await NativeAsync.ZRemAsync(setId, topScoreItemBytes[0], token).ConfigureAwait(false); + return topScoreItemBytes[0].FromUtf8Bytes(); + } + + async ValueTask IRedisClientAsync.PopItemWithHighestScoreFromSortedSetAsync(string setId, CancellationToken token) + { + //TODO: this should be atomic + var topScoreItemBytes = await NativeAsync.ZRevRangeAsync(setId, FirstElement, 1, token).ConfigureAwait(false); + if (topScoreItemBytes.Length == 0) return null; + + await NativeAsync.ZRemAsync(setId, topScoreItemBytes[0], token).ConfigureAwait(false); + return topScoreItemBytes[0].FromUtf8Bytes(); + } + + ValueTask IRedisClientAsync.SortedSetContainsItemAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRankAsync(setId, value.ToUtf8Bytes(), token).Await(val => val != -1); + + ValueTask IRedisClientAsync.IncrementItemInSortedSetAsync(string setId, string value, double incrementBy, CancellationToken token) + => NativeAsync.ZIncrByAsync(setId, incrementBy, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.IncrementItemInSortedSetAsync(string setId, string value, long incrementBy, CancellationToken token) + => NativeAsync.ZIncrByAsync(setId, incrementBy, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.GetItemIndexInSortedSetAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRankAsync(setId, value.ToUtf8Bytes(), token); + + ValueTask IRedisClientAsync.GetItemIndexInSortedSetDescAsync(string setId, string value, CancellationToken token) + => NativeAsync.ZRevRankAsync(setId, value.ToUtf8Bytes(), token); + + ValueTask> IRedisClientAsync.GetAllItemsFromSortedSetAsync(string setId, CancellationToken token) + => NativeAsync.ZRangeAsync(setId, FirstElement, LastElement, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetAllItemsFromSortedSetDescAsync(string setId, CancellationToken token) + => NativeAsync.ZRevRangeAsync(setId, FirstElement, LastElement, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetAsync(string setId, int fromRank, int toRank, CancellationToken token) + => NativeAsync.ZRangeAsync(setId, fromRank, toRank, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetDescAsync(string setId, int fromRank, int toRank, CancellationToken token) + => NativeAsync.ZRevRangeAsync(setId, fromRank, toRank, token).ToStringListAsync(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ValueTask> CreateSortedScoreMapAsync(ValueTask pending) + { + return pending.IsCompletedSuccessfully ? CreateSortedScoreMap(pending.Result).AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask pending) + => CreateSortedScoreMap(await pending.ConfigureAwait(false)); + } + + ValueTask> IRedisClientAsync.GetAllWithScoresFromSortedSetAsync(string setId, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeWithScoresAsync(setId, FirstElement, LastElement, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetAsync(string setId, int fromRank, int toRank, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeWithScoresAsync(setId, fromRank, toRank, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetDescAsync(string setId, int fromRank, int toRank, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRevRangeWithScoresAsync(setId, fromRank, toRank, token)); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByLowestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRevRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => NativeAsync.ZRevRangeByScoreAsync(setId, fromScore, toScore, skip, take, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + { + var fromScore = GetLexicalScore(fromStringScore); + var toScore = GetLexicalScore(toStringScore); + return AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, skip, take, token); + } + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => AsAsync().GetRangeWithScoresFromSortedSetByHighestScoreAsync(setId, fromScore, toScore, null, null, token); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRevRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask> IRedisClientAsync.GetRangeWithScoresFromSortedSetByHighestScoreAsync(string setId, long fromScore, long toScore, int? skip, int? take, CancellationToken token) + => CreateSortedScoreMapAsync(NativeAsync.ZRevRangeByScoreWithScoresAsync(setId, fromScore, toScore, skip, take, token)); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetAsync(string setId, int minRank, int maxRank, CancellationToken token) + => NativeAsync.ZRemRangeByRankAsync(setId, minRank, maxRank, token); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetByScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + => NativeAsync.ZRemRangeByScoreAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.RemoveRangeFromSortedSetByScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + => NativeAsync.ZRemRangeByScoreAsync(setId, fromScore, toScore, token); + + ValueTask IRedisClientAsync.StoreIntersectFromSortedSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + => NativeAsync.ZInterStoreAsync(intoSetId, setIds, token); + + ValueTask IRedisClientAsync.StoreIntersectFromSortedSetsAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + => base.ZInterStoreAsync(intoSetId, setIds, args, token); + + ValueTask IRedisClientAsync.StoreUnionFromSortedSetsAsync(string intoSetId, string[] setIds, CancellationToken token) + => NativeAsync.ZUnionStoreAsync(intoSetId, setIds, token); + + ValueTask IRedisClientAsync.StoreUnionFromSortedSetsAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + => base.ZUnionStoreAsync(intoSetId, setIds, args, token); + + ValueTask IRedisClientAsync.HashContainsEntryAsync(string hashId, string key, CancellationToken token) + => NativeAsync.HExistsAsync(hashId, key.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetEntryInHashIfNotExistsAsync(string hashId, string key, string value, CancellationToken token) + => NativeAsync.HSetNXAsync(hashId, key.ToUtf8Bytes(), value.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask IRedisClientAsync.SetRangeInHashAsync(string hashId, IEnumerable> keyValuePairs, CancellationToken token) + => SetRangeInHashPrepare(keyValuePairs, out var keys, out var values) ? NativeAsync.HMSetAsync(hashId, keys, values, token) : default; + + ValueTask IRedisClientAsync.IncrementValueInHashAsync(string hashId, string key, int incrementBy, CancellationToken token) + => NativeAsync.HIncrbyAsync(hashId, key.ToUtf8Bytes(), incrementBy, token); + + ValueTask IRedisClientAsync.IncrementValueInHashAsync(string hashId, string key, double incrementBy, CancellationToken token) + => NativeAsync.HIncrbyFloatAsync(hashId, key.ToUtf8Bytes(), incrementBy, token); + + ValueTask IRedisClientAsync.GetValueFromHashAsync(string hashId, string key, CancellationToken token) + => NativeAsync.HGetAsync(hashId, key.ToUtf8Bytes(), token).FromUtf8BytesAsync(); + + ValueTask> IRedisClientAsync.GetValuesFromHashAsync(string hashId, string[] keys, CancellationToken token) + { + if (keys.Length == 0) return new List().AsValueTaskResult(); + var keyBytes = ConvertToBytes(keys); + return NativeAsync.HMGetAsync(hashId, keyBytes, token).ToStringListAsync(); + } + + ValueTask IRedisClientAsync.RemoveEntryFromHashAsync(string hashId, string key, CancellationToken token) + => NativeAsync.HDelAsync(hashId, key.ToUtf8Bytes(), token).IsSuccessAsync(); + + ValueTask> IRedisClientAsync.GetHashKeysAsync(string hashId, CancellationToken token) + => NativeAsync.HKeysAsync(hashId, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetHashValuesAsync(string hashId, CancellationToken token) + => NativeAsync.HValsAsync(hashId, token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.GetAllEntriesFromHashAsync(string hashId, CancellationToken token) + => NativeAsync.HGetAllAsync(hashId, token).Await(ret => ret.ToStringDictionary()); + + ValueTask IRedisClientAsync.ExecLuaAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalCommandAsync(body, 0, args.ToMultiByteArray(), token).Await(ret => ret.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaShaAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaCommandAsync(sha1, 0, args.ToMultiByteArray(), token).Await(ret => ret.ToRedisText()); + + ValueTask IRedisClientAsync.ExecLuaAsStringAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalStrAsync(body, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsStringAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaStrAsync(sha1, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaAsIntAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalIntAsync(body, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaAsIntAsync(string body, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalIntAsync(body, keys.Length, MergeAndConvertToBytes(keys, args), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsIntAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaIntAsync(sha1, 0, args.ToMultiByteArray(), token); + + ValueTask IRedisClientAsync.ExecLuaShaAsIntAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaIntAsync(sha1, keys.Length, MergeAndConvertToBytes(keys, args), token); + + ValueTask> IRedisClientAsync.ExecLuaAsListAsync(string body, string[] args, CancellationToken token) + => NativeAsync.EvalAsync(body, 0, args.ToMultiByteArray(), token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.ExecLuaAsListAsync(string body, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalAsync(body, keys.Length, MergeAndConvertToBytes(keys, args), token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.ExecLuaShaAsListAsync(string sha1, string[] args, CancellationToken token) + => NativeAsync.EvalShaAsync(sha1, 0, args.ToMultiByteArray(), token).ToStringListAsync(); + + ValueTask> IRedisClientAsync.ExecLuaShaAsListAsync(string sha1, string[] keys, string[] args, CancellationToken token) + => NativeAsync.EvalShaAsync(sha1, keys.Length, MergeAndConvertToBytes(keys, args), token).ToStringListAsync(); + + ValueTask IRedisClientAsync.CalculateSha1Async(string luaBody, CancellationToken token) + => CalculateSha1(luaBody).AsValueTaskResult(); + + async ValueTask IRedisClientAsync.HasLuaScriptAsync(string sha1Ref, CancellationToken token) + { + var map = await AsAsync().WhichLuaScriptsExistsAsync(new[] { sha1Ref }, token).ConfigureAwait(false); + return map[sha1Ref]; + } + + async ValueTask> IRedisClientAsync.WhichLuaScriptsExistsAsync(string[] sha1Refs, CancellationToken token) + { + var intFlags = await NativeAsync.ScriptExistsAsync(sha1Refs.ToMultiByteArray()).ConfigureAwait(false); + return WhichLuaScriptsExistsParseResult(sha1Refs, intFlags); + } + + ValueTask IRedisClientAsync.RemoveAllLuaScriptsAsync(CancellationToken token) + => NativeAsync.ScriptFlushAsync(token); + + ValueTask IRedisClientAsync.KillRunningLuaScriptAsync(CancellationToken token) + => NativeAsync.ScriptKillAsync(token); + + ValueTask IRedisClientAsync.CustomAsync(params object[] cmdWithArgs) + => AsAsync().CustomAsync(cmdWithArgs, token: default); + + ValueTask IRedisClientAsync.RemoveEntryAsync(params string[] args) + => AsAsync().RemoveEntryAsync(args, token: default); + + ValueTask IRedisClientAsync.AddToHyperLogAsync(string key, params string[] elements) + => AsAsync().AddToHyperLogAsync(key, elements, token: default); + + ValueTask IRedisClientAsync.MergeHyperLogsAsync(string toKey, params string[] fromKeys) + => AsAsync().MergeHyperLogsAsync(toKey, fromKeys, token: default); + + ValueTask IRedisClientAsync.AddGeoMembersAsync(string key, params RedisGeo[] geoPoints) + => AsAsync().AddGeoMembersAsync(key, geoPoints, token: default); + + ValueTask IRedisClientAsync.GetGeohashesAsync(string key, params string[] members) + => AsAsync().GetGeohashesAsync(key, members, token: default); + + ValueTask> IRedisClientAsync.GetGeoCoordinatesAsync(string key, params string[] members) + => AsAsync().GetGeoCoordinatesAsync(key, members, token: default); + + ValueTask IRedisClientAsync.WatchAsync(params string[] keys) + => AsAsync().WatchAsync(keys, token: default); + + ValueTask> IRedisClientAsync.GetIntersectFromSetsAsync(params string[] setIds) + => AsAsync().GetIntersectFromSetsAsync(setIds, token: default); + + ValueTask IRedisClientAsync.StoreIntersectFromSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreIntersectFromSetsAsync(intoSetId, setIds, token: default); + + ValueTask> IRedisClientAsync.GetUnionFromSetsAsync(params string[] setIds) + => AsAsync().GetUnionFromSetsAsync(setIds, token: default); + + ValueTask IRedisClientAsync.StoreUnionFromSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreUnionFromSetsAsync(intoSetId, setIds, token: default); + + ValueTask> IRedisClientAsync.GetDifferencesFromSetAsync(string fromSetId, params string[] withSetIds) + => AsAsync().GetDifferencesFromSetAsync(fromSetId, withSetIds, token: default); + + ValueTask IRedisClientAsync.StoreDifferencesFromSetAsync(string intoSetId, string fromSetId, params string[] withSetIds) + => AsAsync().StoreDifferencesFromSetAsync(intoSetId, fromSetId, withSetIds, token: default); + + ValueTask IRedisClientAsync.StoreIntersectFromSortedSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreIntersectFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask IRedisClientAsync.StoreUnionFromSortedSetsAsync(string intoSetId, params string[] setIds) + => AsAsync().StoreUnionFromSortedSetsAsync(intoSetId, setIds, token: default); + + ValueTask> IRedisClientAsync.GetValuesFromHashAsync(string hashId, params string[] keys) + => AsAsync().GetValuesFromHashAsync(hashId, keys, token: default); + + ValueTask IRedisClientAsync.ExecLuaAsync(string body, params string[] args) + => AsAsync().ExecLuaAsync(body, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaShaAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsync(sha1, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaAsStringAsync(string luaBody, params string[] args) + => AsAsync().ExecLuaAsStringAsync(luaBody, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaShaAsStringAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsStringAsync(sha1, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaAsIntAsync(string luaBody, params string[] args) + => AsAsync().ExecLuaAsIntAsync(luaBody, args, token: default); + + ValueTask IRedisClientAsync.ExecLuaShaAsIntAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsIntAsync(sha1, args, token: default); + + ValueTask> IRedisClientAsync.ExecLuaAsListAsync(string luaBody, params string[] args) + => AsAsync().ExecLuaAsListAsync(luaBody, args, token: default); + + ValueTask> IRedisClientAsync.ExecLuaShaAsListAsync(string sha1, params string[] args) + => AsAsync().ExecLuaShaAsListAsync(sha1, args, token: default); + + ValueTask> IRedisClientAsync.WhichLuaScriptsExistsAsync(params string[] sha1Refs) + => AsAsync().WhichLuaScriptsExistsAsync(sha1Refs, token: default); + } +} diff --git a/src/ServiceStack.Redis/RedisClient.ICacheClient.cs b/src/ServiceStack.Redis/RedisClient.ICacheClient.cs index 8c8cde6a..21bd3372 100644 --- a/src/ServiceStack.Redis/RedisClient.ICacheClient.cs +++ b/src/ServiceStack.Redis/RedisClient.ICacheClient.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -20,11 +20,11 @@ namespace ServiceStack.Redis { public partial class RedisClient - : ICacheClient, IRemoveByPattern + : ICacheClient { public T Exec(Func action) { - using (JsConfig.With(excludeTypeInfo: false)) + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) { return action(this); } @@ -32,7 +32,7 @@ public T Exec(Func action) public void Exec(Action action) { - using (JsConfig.With(excludeTypeInfo: false)) + using (JsConfig.With(new Text.Config { ExcludeTypeInfo = false })) { action(this); } @@ -52,10 +52,25 @@ public T Get(string key) ); } + //Looking up Dictionary for type is faster than HashSet. + private static readonly Dictionary numericTypes = new Dictionary { + { typeof(byte), true}, + { typeof(sbyte), true}, + { typeof(short), true}, + { typeof(ushort), true}, + { typeof(int), true}, + { typeof(uint), true}, + { typeof(long), true}, + { typeof(ulong), true}, + { typeof(double), true}, + { typeof(float), true}, + { typeof(decimal), true} + }; + private static byte[] ToBytes(T value) { var bytesValue = value as byte[]; - if (bytesValue == null && !Equals(value, default(T))) + if (bytesValue == null && (numericTypes.ContainsKey(typeof(T)) || !Equals(value, default(T)))) bytesValue = value.ToJson().ToUtf8Bytes(); return bytesValue; } @@ -157,60 +172,74 @@ public IDictionary GetAll(IEnumerable keys) { var keysArray = keys.ToArray(); var keyValues = r.MGet(keysArray); - var results = new Dictionary(); - var isBytes = typeof(T) == typeof(byte[]); - var i = 0; - foreach (var keyValue in keyValues) + return ProcessGetAllResult(keysArray, keyValues); + }); + } + + private static IDictionary ProcessGetAllResult(string[] keysArray, byte[][] keyValues) + { + var results = new Dictionary(); + var isBytes = typeof(T) == typeof(byte[]); + + var i = 0; + foreach (var keyValue in keyValues) + { + var key = keysArray[i++]; + + if (keyValue == null) { - var key = keysArray[i++]; - - if (keyValue == null) - { - results[key] = default(T); - continue; - } - - if (isBytes) - { - results[key] = (T)(object)keyValue; - } - else - { - var keyValueString = Encoding.UTF8.GetString(keyValue); - results[key] = JsonSerializer.DeserializeFromString(keyValueString); - } + results[key] = default(T); + continue; } - return results; - }); + + if (isBytes) + { + results[key] = (T)(object)keyValue; + } + else + { + var keyValueString = Encoding.UTF8.GetString(keyValue); + results[key] = JsonSerializer.DeserializeFromString(keyValueString); + } + } + return results; } public void SetAll(IDictionary values) { - Exec(r => + if (values.Count != 0) { - var keys = values.Keys.ToArray(); - var valBytes = new byte[values.Count][]; - var isBytes = typeof(T) == typeof(byte[]); + Exec(r => + { + // need to do this inside Exec for the JSON config bits + GetSetAllBytesTyped(values, out var keys, out var valBytes); + r.MSet(keys, valBytes); + }); + } + } - var i = 0; - foreach (var value in values.Values) + private static void GetSetAllBytesTyped(IDictionary values, out string[] keys, out byte[][] valBytes) + { + keys = values.Keys.ToArray(); + valBytes = new byte[values.Count][]; + var isBytes = typeof(T) == typeof(byte[]); + + var i = 0; + foreach (var value in values.Values) + { + if (!isBytes) { - if (!isBytes) - { - var t = JsonSerializer.SerializeToString(value); - if (t != null) - valBytes[i] = t.ToUtf8Bytes(); - else - valBytes[i] = new byte[] { }; - } + var t = JsonSerializer.SerializeToString(value); + if (t != null) + valBytes[i] = t.ToUtf8Bytes(); else - valBytes[i] = (byte[])(object)value ?? new byte[] { }; - i++; + valBytes[i] = new byte[] { }; } - - r.MSet(keys, valBytes); - }); + else + valBytes[i] = (byte[])(object)value ?? new byte[] { }; + i++; + } } } diff --git a/src/ServiceStack.Redis/RedisClient.cs b/src/ServiceStack.Redis/RedisClient.cs index 1f823e81..1908f322 100644 --- a/src/ServiceStack.Redis/RedisClient.cs +++ b/src/ServiceStack.Redis/RedisClient.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -16,9 +16,11 @@ using System.Collections.Generic; using System.Linq; using System.Text; +using System.Threading; using ServiceStack.Redis.Generic; using ServiceStack.Redis.Pipeline; using ServiceStack.Text; +using ServiceStack.Caching; namespace ServiceStack.Redis { @@ -30,7 +32,7 @@ namespace ServiceStack.Redis /// RedisClient.Sets => ICollection[string] /// public partial class RedisClient - : RedisNativeClient, IRedisClient + : RedisNativeClient, IRedisClient, IRemoveByPattern // IRemoveByPattern is implemented in this file. { public RedisClient() { @@ -95,7 +97,7 @@ public void Init() public string this[string key] { get { return GetValue(key); } - set { SetEntry(key, value); } + set { SetValue(key, value); } } public override void OnConnected() { } @@ -119,31 +121,9 @@ public RedisText Custom(params object[] cmdWithArgs) public List GetAllKeys() => SearchKeys("*"); - [Obsolete("Use SetValue()")] - public void SetEntry(string key, string value) => SetValue(key, value); - [Obsolete("Use SetValue()")] - public void SetEntry(string key, string value, TimeSpan expireIn) => SetValue(key, value, expireIn); - [Obsolete("Use SetValueIfExists()")] - public bool SetEntryIfExists(string key, string value) => SetValueIfExists(key, value); - [Obsolete("Use SetValueIfNotExists()")] - public bool SetEntryIfNotExists(string key, string value) => SetValueIfNotExists(key, value); - [Obsolete("Use SetValueIfExists()")] - public bool SetEntryIfExists(string key, string value, TimeSpan expireIn) => SetValueIfExists(key, value, expireIn); - [Obsolete("Use SetValueIfNotExists()")] - public bool SetEntryIfNotExists(string key, string value, TimeSpan expireIn) => SetValueIfNotExists(key, value, expireIn); - [Obsolete("Use GetClientsInfo")] - public List> GetClientList() => GetClientsInfo(); - [Obsolete("Use GetValue()")] - public string GetEntry(string key) => GetValue(key); - [Obsolete("Use GetAndSetValue()")] - public string GetAndSetEntry(string key, string value) => GetAndSetValue(key, value); - public void SetValue(string key, string value) { - var bytesValue = value != null - ? value.ToUtf8Bytes() - : null; - + var bytesValue = value?.ToUtf8Bytes(); base.Set(key, bytesValue); } @@ -163,9 +143,7 @@ public bool SetValue(byte[] key, byte[] value, TimeSpan expireIn) public void SetValue(string key, string value, TimeSpan expireIn) { - var bytesValue = value != null - ? value.ToUtf8Bytes() - : null; + var bytesValue = value?.ToUtf8Bytes(); if (AssertServerVersionNumber() >= 2610) { @@ -182,21 +160,19 @@ public void SetValue(string key, string value, TimeSpan expireIn) public bool SetValueIfExists(string key, string value) { - var bytesValue = value != null ? value.ToUtf8Bytes() : null; - + var bytesValue = value?.ToUtf8Bytes(); return base.Set(key, bytesValue, exists: true); } public bool SetValueIfNotExists(string key, string value) { - var bytesValue = value != null ? value.ToUtf8Bytes() : null; - + var bytesValue = value?.ToUtf8Bytes(); return base.Set(key, bytesValue, exists: false); } public bool SetValueIfExists(string key, string value, TimeSpan expireIn) { - var bytesValue = value != null ? value.ToUtf8Bytes() : null; + var bytesValue = value?.ToUtf8Bytes(); if (expireIn.Milliseconds > 0) return base.Set(key, bytesValue, exists: true, expiryMs: (long)expireIn.TotalMilliseconds); @@ -206,7 +182,7 @@ public bool SetValueIfExists(string key, string value, TimeSpan expireIn) public bool SetValueIfNotExists(string key, string value, TimeSpan expireIn) { - var bytesValue = value != null ? value.ToUtf8Bytes() : null; + var bytesValue = value?.ToUtf8Bytes(); if (expireIn.Milliseconds > 0) return base.Set(key, bytesValue, exists: false, expiryMs: (long)expireIn.TotalMilliseconds); @@ -221,32 +197,54 @@ public void SetValues(Dictionary map) public void SetAll(IEnumerable keys, IEnumerable values) { - if (keys == null || values == null) return; + if (GetSetAllBytes(keys, values, out var keyBytes, out var valBytes)) + { + base.MSet(keyBytes, valBytes); + } + } + + bool GetSetAllBytes(IEnumerable keys, IEnumerable values, out byte[][] keyBytes, out byte[][] valBytes) + { + keyBytes = valBytes = default; + if (keys == null || values == null) return false; var keyArray = keys.ToArray(); var valueArray = values.ToArray(); if (keyArray.Length != valueArray.Length) throw new Exception("Key length != Value Length. {0}/{1}".Fmt(keyArray.Length, valueArray.Length)); - if (keyArray.Length == 0) return; + if (keyArray.Length == 0) return false; - var keyBytes = new byte[keyArray.Length][]; - var valBytes = new byte[keyArray.Length][]; + keyBytes = new byte[keyArray.Length][]; + valBytes = new byte[keyArray.Length][]; for (int i = 0; i < keyArray.Length; i++) { keyBytes[i] = keyArray[i].ToUtf8Bytes(); valBytes[i] = valueArray[i].ToUtf8Bytes(); } - base.MSet(keyBytes, valBytes); + return true; } public void SetAll(Dictionary map) { - if (map == null || map.Count == 0) return; + if (GetSetAllBytes(map, out var keyBytes, out var valBytes)) + { + base.MSet(keyBytes, valBytes); + } + } + + private static bool GetSetAllBytes(IDictionary map, out byte[][] keyBytes, out byte[][] valBytes) + { + if (map == null || map.Count == 0) + { + keyBytes = null; + valBytes = null; + return false; + } - var keyBytes = new byte[map.Count][]; - var valBytes = new byte[map.Count][]; + keyBytes = new byte[map.Count][]; + valBytes = new byte[map.Count][]; var i = 0; foreach (var key in map.Keys) @@ -256,16 +254,13 @@ public void SetAll(Dictionary map) valBytes[i] = val.ToUtf8Bytes(); i++; } - - base.MSet(keyBytes, valBytes); + return true; } public string GetValue(string key) { var bytes = Get(key); - return bytes == null - ? null - : bytes.FromUtf8Bytes(); + return bytes?.FromUtf8Bytes(); } public string GetAndSetValue(string key, string value) @@ -347,25 +342,23 @@ public string GetRandomKey() public bool ExpireEntryIn(string key, TimeSpan expireIn) { - if (AssertServerVersionNumber() >= 2600) + if (UseMillisecondExpiration(expireIn)) { - if (expireIn.Milliseconds > 0) - { - return PExpire(key, (long)expireIn.TotalMilliseconds); - } + return PExpire(key, (long)expireIn.TotalMilliseconds); } return Expire(key, (int)expireIn.TotalSeconds); } + private bool UseMillisecondExpiration(TimeSpan value) + + => AssertServerVersionNumber() >= 2600 && value.Milliseconds > 0; + public bool ExpireEntryIn(byte[] key, TimeSpan expireIn) { - if (AssertServerVersionNumber() >= 2600) + if (UseMillisecondExpiration(expireIn)) { - if (expireIn.Milliseconds > 0) - { - return PExpire(key, (long)expireIn.TotalMilliseconds); - } + return PExpire(key, (long)expireIn.TotalMilliseconds); } return Expire(key, (int)expireIn.TotalSeconds); @@ -384,8 +377,10 @@ public bool ExpireEntryAt(string key, DateTime expireAt) } public TimeSpan? GetTimeToLive(string key) + => ParseTimeToLiveResult(Ttl(key)); + + private static TimeSpan? ParseTimeToLiveResult(long ttlSecs) { - var ttlSecs = Ttl(key); if (ttlSecs == -1) return TimeSpan.MaxValue; //no expiry set @@ -395,6 +390,11 @@ public bool ExpireEntryAt(string key, DateTime expireAt) return TimeSpan.FromSeconds(ttlSecs); } + public void RemoveExpiredEntries() + { + //Redis automatically removed expired Cache Entries + } + public IRedisTypedClient As() { try @@ -422,12 +422,12 @@ public IDisposable AcquireLock(string key, TimeSpan timeOut) public IRedisTransaction CreateTransaction() { AssertServerVersionNumber(); // pre-fetch call to INFO before transaction if needed - return new RedisTransaction(this); + return new RedisTransaction(this, false); } public void AssertNotInTransaction() { - if (Transaction != null) + if (Transaction != null || Pipeline != null) throw new NotSupportedException("Only atomic redis-server operations are supported in a transaction"); } @@ -438,18 +438,20 @@ public IRedisPipeline CreatePipeline() public List SearchKeys(string pattern) { - var multiDataList = Keys(pattern); - return multiDataList.ToStringList(); + var multiDataList = ScanAllKeys(pattern); + return multiDataList.ToList(); } public List GetValues(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new List(); - var resultBytesArray = MGet(keys.ToArray()); - - var results = new List(); + return ParseGetValuesResult(MGet(keys.ToArray())); + } + private static List ParseGetValuesResult(byte[][] resultBytesArray) + { + var results = new List(resultBytesArray.Length); foreach (var resultBytes in resultBytesArray) { if (resultBytes == null) continue; @@ -463,12 +465,15 @@ public List GetValues(List keys) public List GetValues(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new List(); - var resultBytesArray = MGet(keys.ToArray()); + return ParseGetValuesResult(MGet(keys.ToArray())); + } - var results = new List(); + private static List ParseGetValuesResult(byte[][] resultBytesArray) + { + var results = new List(resultBytesArray.Length); foreach (var resultBytes in resultBytesArray) { if (resultBytes == null) continue; @@ -483,12 +488,17 @@ public List GetValues(List keys) public Dictionary GetValuesMap(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new Dictionary(); var keysArray = keys.ToArray(); var resultBytesArray = MGet(keysArray); + return ParseGetValuesMapResult(keysArray, resultBytesArray); + } + + private static Dictionary ParseGetValuesMapResult(string[] keysArray, byte[][] resultBytesArray) + { var results = new Dictionary(); for (var i = 0; i < resultBytesArray.Length; i++) { @@ -511,12 +521,17 @@ public Dictionary GetValuesMap(List keys) public Dictionary GetValuesMap(List keys) { - if (keys == null) throw new ArgumentNullException("keys"); + if (keys == null) throw new ArgumentNullException(nameof(keys)); if (keys.Count == 0) return new Dictionary(); var keysArray = keys.ToArray(); var resultBytesArray = MGet(keysArray); + return ParseGetValuesMapResult(keysArray, resultBytesArray); + } + + private static Dictionary ParseGetValuesMapResult(string[] keysArray, byte[][] resultBytesArray) + { var results = new Dictionary(); for (var i = 0; i < resultBytesArray.Length; i++) { @@ -538,7 +553,7 @@ public Dictionary GetValuesMap(List keys) return results; } - public IRedisSubscription CreateSubscription() + public override IRedisSubscription CreateSubscription() { return new RedisSubscription(this); } @@ -550,13 +565,11 @@ public long PublishMessage(string toChannel, string message) #region IBasicPersistenceProvider - Dictionary> registeredTypeIdsWithinPipelineMap = new Dictionary>(); internal HashSet GetRegisteredTypeIdsWithinPipeline(string typeIdsSet) { - HashSet registeredTypeIdsWithinPipeline; - if (!registeredTypeIdsWithinPipelineMap.TryGetValue(typeIdsSet, out registeredTypeIdsWithinPipeline)) + if (!registeredTypeIdsWithinPipelineMap.TryGetValue(typeIdsSet, out var registeredTypeIdsWithinPipeline)) { registeredTypeIdsWithinPipeline = new HashSet(); registeredTypeIdsWithinPipelineMap[typeIdsSet] = registeredTypeIdsWithinPipeline; @@ -601,7 +614,16 @@ internal void RegisterTypeIds(IEnumerable values) } } - internal void RemoveTypeIds(params string[] ids) + internal void RemoveTypeIdsById(string id) + { + var typeIdsSetKey = GetTypeIdsSetKey(); + if (this.Pipeline != null) + GetRegisteredTypeIdsWithinPipeline(typeIdsSetKey).Remove(id); + else + this.RemoveItemFromSet(typeIdsSetKey, id); + } + + internal void RemoveTypeIdsByIds(IEnumerable ids) { var typeIdsSetKey = GetTypeIdsSetKey(); if (this.Pipeline != null) @@ -615,7 +637,9 @@ internal void RemoveTypeIds(params string[] ids) } } - internal void RemoveTypeIds(params T[] values) + internal void RemoveTypeIdsByValue(T value) => RemoveTypeIdsById(value.GetId().ToString()); + + internal void RemoveTypeIdsByValues(IEnumerable values) { var typeIdsSetKey = GetTypeIdsSetKey(); if (this.Pipeline != null) @@ -674,7 +698,7 @@ public T Store(T entity) var urnKey = UrnKey(entity); var valueString = JsonSerializer.SerializeToString(entity); - this.SetEntry(urnKey, valueString); + this.SetValue(urnKey, valueString); RegisterTypeId(entity); return entity; @@ -682,14 +706,14 @@ public T Store(T entity) public object StoreObject(object entity) { - if (entity == null) throw new ArgumentNullException("entity"); + if (entity == null) throw new ArgumentNullException(nameof(entity)); var id = entity.GetObjectId(); var entityType = entity.GetType(); var urnKey = UrnKey(entityType, id); var valueString = JsonSerializer.SerializeToString(entity); - this.SetEntry(urnKey, valueString); + this.SetValue(urnKey, valueString); RegisterTypeId(GetTypeIdsSetKey(entityType), id.ToString()); @@ -722,42 +746,69 @@ public void StoreAsHash(T entity) //Without the Generic Constraints internal void _StoreAll(IEnumerable entities) { - if (entities == null) return; + if (PrepareStoreAll(entities, out var keys, out var values, out var entitiesList)) + { + base.MSet(keys, values); + RegisterTypeIds(entitiesList); + } + } - var entitiesList = entities.ToList(); + private bool PrepareStoreAll(IEnumerable entities, out byte[][] keys, out byte[][] values, out List entitiesList) + { + if (entities == null) + { + entitiesList = default; + keys = values = default; + return false; + } + + entitiesList = entities.ToList(); var len = entitiesList.Count; - if (len == 0) return; + if (len == 0) + { + keys = values = default; + return false; + } - var keys = new byte[len][]; - var values = new byte[len][]; + keys = new byte[len][]; + values = new byte[len][]; for (var i = 0; i < len; i++) { keys[i] = UrnKey(entitiesList[i]).ToUtf8Bytes(); values[i] = SerializeToUtf8Bytes(entitiesList[i]); } - - base.MSet(keys, values); - RegisterTypeIds(entitiesList); + return true; } public void WriteAll(IEnumerable entities) { - if (entities == null) return; + if (PrepareWriteAll(entities, out var keys, out var values)) + { + base.MSet(keys, values); + } + } + + private bool PrepareWriteAll(IEnumerable entities, out byte[][] keys, out byte[][] values) + { + if (entities == null) + { + keys = values = default; + return false; + } var entitiesList = entities.ToList(); var len = entitiesList.Count; - var keys = new byte[len][]; - var values = new byte[len][]; + keys = new byte[len][]; + values = new byte[len][]; for (var i = 0; i < len; i++) { keys[i] = UrnKey(entitiesList[i]).ToUtf8Bytes(); values[i] = SerializeToUtf8Bytes(entitiesList[i]); } - - base.MSet(keys, values); + return true; } public static byte[] SerializeToUtf8Bytes(T value) @@ -769,14 +820,14 @@ public void Delete(T entity) { var urnKey = UrnKey(entity); this.Remove(urnKey); - this.RemoveTypeIds(entity); + this.RemoveTypeIdsByValue(entity); } public void DeleteById(object id) { var urnKey = UrnKey(id); this.Remove(urnKey); - this.RemoveTypeIds(id.ToString()); + this.RemoveTypeIdsById(id.ToString()); } public void DeleteByIds(ICollection ids) @@ -784,32 +835,38 @@ public void DeleteByIds(ICollection ids) if (ids == null || ids.Count == 0) return; var idsList = ids.Cast(); - var urnKeys = idsList.Map(UrnKey); - this.RemoveEntry(urnKeys.ToArray()); - this.RemoveTypeIds(idsList.Map(x => x.ToString()).ToArray()); + var urnKeys = idsList.Select(UrnKey).ToArray(); + this.RemoveEntry(urnKeys); + this.RemoveTypeIdsByIds(ids.Map(x => x.ToString()).ToArray()); } public void DeleteAll() { - var typeIdsSetKey = this.GetTypeIdsSetKey(); - var ids = this.GetAllItemsFromSet(typeIdsSetKey); - if (ids.Count > 0) - { - var urnKeys = ids.ToList().ConvertAll(UrnKey); - this.RemoveEntry(urnKeys.ToArray()); - this.Remove(typeIdsSetKey); - } + DeleteAll(0,RedisConfig.CommandKeysBatchSize); } - public RedisClient CloneClient() + private void DeleteAll(ulong cursor, int batchSize) { - return new RedisClient(Host, Port, Password, Db) + var typeIdsSetKey = this.GetTypeIdsSetKey(); + do { - SendTimeout = SendTimeout, - ReceiveTimeout = ReceiveTimeout - }; + var scanResult = this.SScan(typeIdsSetKey, cursor, batchSize); + cursor = scanResult.Cursor; + var urnKeys = scanResult.Results.Select(id => UrnKey(id.FromUtf8Bytes())).ToArray(); + if (urnKeys.Length > 0) + { + this.RemoveEntry(urnKeys); + } + } while (cursor != 0); + + this.RemoveEntry(typeIdsSetKey); } + public RedisClient CloneClient() => new(Host, Port, Password, Db) { + SendTimeout = SendTimeout, + ReceiveTimeout = ReceiveTimeout + }; + /// /// Returns key with automatic object id detection in provided value with generic type. /// @@ -817,7 +874,7 @@ public RedisClient CloneClient() /// public string UrnKey(T value) { - return String.Concat(NamespacePrefix, value.CreateUrn()); + return string.Concat(NamespacePrefix, value.CreateUrn()); } /// @@ -827,7 +884,7 @@ public string UrnKey(T value) /// public string UrnKey(object id) { - return String.Concat(NamespacePrefix, IdUtils.CreateUrn(id)); + return string.Concat(NamespacePrefix, IdUtils.CreateUrn(id)); } /// @@ -838,7 +895,7 @@ public string UrnKey(object id) /// public string UrnKey(Type type, object id) { - return String.Concat(NamespacePrefix, IdUtils.CreateUrn(type, id)); + return string.Concat(NamespacePrefix, IdUtils.CreateUrn(type, id)); } @@ -846,13 +903,11 @@ public string UrnKey(Type type, object id) #region LUA EVAL - static readonly ConcurrentDictionary CachedLuaSha1Map = - new ConcurrentDictionary(); + static readonly ConcurrentDictionary CachedLuaSha1Map = new(); public T ExecCachedLua(string scriptBody, Func scriptSha1) { - string sha1; - if (!CachedLuaSha1Map.TryGetValue(scriptBody, out sha1)) + if (!CachedLuaSha1Map.TryGetValue(scriptBody, out var sha1)) CachedLuaSha1Map[scriptBody] = sha1 = LoadLuaScript(scriptBody); try @@ -905,7 +960,7 @@ public long ExecLuaAsInt(string luaBody, string[] keys, string[] args) public long ExecLuaShaAsInt(string sha1, params string[] args) { - return base.EvalShaInt(sha1, args.Length, args.ToMultiByteArray()); + return base.EvalShaInt(sha1, 0, args.ToMultiByteArray()); } public long ExecLuaShaAsInt(string sha1, string[] keys, string[] args) @@ -962,6 +1017,10 @@ public bool HasLuaScript(string sha1Ref) public Dictionary WhichLuaScriptsExists(params string[] sha1Refs) { var intFlags = base.ScriptExists(sha1Refs.ToMultiByteArray()); + return WhichLuaScriptsExistsParseResult(sha1Refs, intFlags); + } + static Dictionary WhichLuaScriptsExistsParseResult(string[] sha1Refs, byte[][] intFlags) + { var map = new Dictionary(); for (int i = 0; i < sha1Refs.Length; i++) { @@ -990,15 +1049,18 @@ public string LoadLuaScript(string body) public void RemoveByPattern(string pattern) { - List keys = Keys(pattern).ToStringList(); - if (keys.Count > 0) - Del(keys.ToArray()); + var keys = ScanAllKeys(pattern).ToArray(); + if (keys.Length > 0) + Del(keys); } public void RemoveByRegex(string pattern) { - RemoveByPattern(pattern.Replace(".*", "*").Replace(".+", "?")); + RemoveByPattern(RegexToGlob(pattern)); } + + private static string RegexToGlob(string regex) + => regex.Replace(".*", "*").Replace(".+", "?"); public IEnumerable ScanAllKeys(string pattern = null, int pageSize = 1000) { @@ -1096,8 +1158,7 @@ public RedisServerRole GetServerRole() return ToServerRole(roleName); } - string role; - this.Info.TryGetValue("role", out role); + this.Info.TryGetValue("role", out var role); return ToServerRole(role); } @@ -1118,6 +1179,30 @@ private static RedisServerRole ToServerRole(string roleName) return RedisServerRole.Unknown; } } + + internal RedisClient LimitAccessToThread(int originalThreadId, string originalStackTrace) + { + TrackThread = new TrackThread(originalThreadId, originalStackTrace); + return this; + } + } + + internal struct TrackThread + { + public readonly int ThreadId; + public readonly string StackTrace; + + public TrackThread(int threadId, string stackTrace) + { + ThreadId = threadId; + StackTrace = stackTrace; + } + } + + public class InvalidAccessException : RedisException + { + public InvalidAccessException(int threadId, string stackTrace) + : base($"The Current Thread #{Thread.CurrentThread.ManagedThreadId} is different to the original Thread #{threadId} that resolved this pooled client at: \n{stackTrace}") { } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientExtensions.cs b/src/ServiceStack.Redis/RedisClientExtensions.cs index 5f7ef45b..79e655ed 100644 --- a/src/ServiceStack.Redis/RedisClientExtensions.cs +++ b/src/ServiceStack.Redis/RedisClientExtensions.cs @@ -1,6 +1,6 @@ namespace ServiceStack.Redis { - public static class RedisClientExtensions + public static partial class RedisClientExtensions { public static string GetHostString(this IRedisClient redis) { diff --git a/src/ServiceStack.Redis/RedisClientHash.Async.cs b/src/ServiceStack.Redis/RedisClientHash.Async.cs new file mode 100644 index 00000000..c1bab49f --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientHash.Async.cs @@ -0,0 +1,55 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientHash + : IRedisHashAsync + { + private IRedisClientAsync AsyncClient => client; + + ValueTask IRedisHashAsync.AddAsync(KeyValuePair item, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(hashId, item.Key, item.Value, token).Await(); + + ValueTask IRedisHashAsync.AddAsync(string key, string value, CancellationToken token) + => AsyncClient.SetEntryInHashAsync(hashId, key, value, token).Await(); + + ValueTask IRedisHashAsync.AddIfNotExistsAsync(KeyValuePair item, CancellationToken token) + => AsyncClient.SetEntryInHashIfNotExistsAsync(hashId, item.Key, item.Value, token); + + ValueTask IRedisHashAsync.AddRangeAsync(IEnumerable> items, CancellationToken token) + => AsyncClient.SetRangeInHashAsync(hashId, items, token); + + ValueTask IRedisHashAsync.ClearAsync(CancellationToken token) + => new ValueTask(AsyncClient.RemoveAsync(hashId, token)); + + ValueTask IRedisHashAsync.ContainsKeyAsync(string key, CancellationToken token) + => AsyncClient.HashContainsEntryAsync(hashId, key, token); + + ValueTask IRedisHashAsync.CountAsync(CancellationToken token) + => AsyncClient.GetHashCountAsync(hashId, token).AsInt32(); + + IAsyncEnumerator> IAsyncEnumerable>.GetAsyncEnumerator(CancellationToken token) + => AsyncClient.ScanAllHashEntriesAsync(hashId).GetAsyncEnumerator(token); // note: we're using HSCAN here, not HGETALL + + ValueTask IRedisHashAsync.IncrementValueAsync(string key, int incrementBy, CancellationToken token) + => AsyncClient.IncrementValueInHashAsync(hashId, key, incrementBy, token); + + ValueTask IRedisHashAsync.RemoveAsync(string key, CancellationToken token) + => AsyncClient.RemoveEntryFromHashAsync(hashId, key, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientHash.cs b/src/ServiceStack.Redis/RedisClientHash.cs index dd9efd76..5aad5911 100644 --- a/src/ServiceStack.Redis/RedisClientHash.cs +++ b/src/ServiceStack.Redis/RedisClientHash.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -20,7 +20,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientHash + internal partial class RedisClientHash : IRedisHash { private readonly RedisClient client; diff --git a/src/ServiceStack.Redis/RedisClientList.Async.cs b/src/ServiceStack.Redis/RedisClientList.Async.cs new file mode 100644 index 00000000..a995081b --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientList.Async.cs @@ -0,0 +1,161 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientList + : IRedisListAsync + { + private IRedisClientAsync AsyncClient => client; + private IRedisListAsync AsAsync() => this; + + ValueTask IRedisListAsync.AppendAsync(string value, CancellationToken token) + => AsyncClient.AddItemToListAsync(listId, value, token); + + ValueTask IRedisListAsync.BlockingDequeueAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingDequeueItemFromListAsync(listId, timeOut, token); + + ValueTask IRedisListAsync.BlockingPopAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingPopItemFromListAsync(listId, timeOut, token); + + ValueTask IRedisListAsync.BlockingRemoveStartAsync(TimeSpan? timeOut, CancellationToken token) + => AsyncClient.BlockingRemoveStartFromListAsync(listId, timeOut, token); + + ValueTask IRedisListAsync.CountAsync(CancellationToken token) + => AsyncClient.GetListCountAsync(listId, token).AsInt32(); + + ValueTask IRedisListAsync.DequeueAsync(CancellationToken token) + => AsyncClient.DequeueItemFromListAsync(listId, token); + + ValueTask IRedisListAsync.EnqueueAsync(string value, CancellationToken token) + => AsyncClient.EnqueueItemOnListAsync(listId, value, token); + + ValueTask> IRedisListAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromListAsync(listId, token); + + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + var count = await AsAsync().CountAsync(token).ConfigureAwait(false); + if (count <= PageLimit) + { + var all = await AsyncClient.GetAllItemsFromListAsync(listId, token).ConfigureAwait(false); + foreach (var item in all) + { + yield return item; + } + } + else + { + // from GetPagingEnumerator() + var skip = 0; + List pageResults; + do + { + pageResults = await AsyncClient.GetRangeFromListAsync(listId, skip, skip + PageLimit - 1, token).ConfigureAwait(false); + foreach (var result in pageResults) + { + yield return result; + } + skip += PageLimit; + } while (pageResults.Count == PageLimit); + } + } + + ValueTask> IRedisListAsync.GetRangeAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetRangeFromListAsync(listId, startingFrom, endingAt, token); + + ValueTask> IRedisListAsync.GetRangeFromSortedListAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetRangeFromSortedListAsync(listId, startingFrom, endingAt, token); + + ValueTask IRedisListAsync.PopAndPushAsync(IRedisListAsync toList, CancellationToken token) + => AsyncClient.PopAndPushItemBetweenListsAsync(listId, toList.Id, token); + + ValueTask IRedisListAsync.PopAsync(CancellationToken token) + => AsyncClient.PopItemFromListAsync(listId, token); + + ValueTask IRedisListAsync.PrependAsync(string value, CancellationToken token) + => AsyncClient.PrependItemToListAsync(listId, value, token); + + ValueTask IRedisListAsync.PushAsync(string value, CancellationToken token) + => AsyncClient.PushItemToListAsync(listId, value, token); + + ValueTask IRedisListAsync.RemoveAllAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(listId, token); + + ValueTask IRedisListAsync.RemoveEndAsync(CancellationToken token) + => AsyncClient.RemoveEndFromListAsync(listId, token); + + ValueTask IRedisListAsync.RemoveStartAsync(CancellationToken token) + => AsyncClient.RemoveStartFromListAsync(listId, token); + + ValueTask IRedisListAsync.RemoveValueAsync(string value, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(listId, value, token); + + ValueTask IRedisListAsync.RemoveValueAsync(string value, int noOfMatches, CancellationToken token) + => AsyncClient.RemoveItemFromListAsync(listId, value, noOfMatches, token); + + ValueTask IRedisListAsync.TrimAsync(int keepStartingFrom, int keepEndingAt, CancellationToken token) + => AsyncClient.TrimListAsync(listId, keepStartingFrom, keepEndingAt, token); + + async ValueTask IRedisListAsync.RemoveAsync(string value, CancellationToken token) + => (await AsyncClient.RemoveItemFromListAsync(listId, value, token).ConfigureAwait(false)) > 0; + + ValueTask IRedisListAsync.AddAsync(string value, CancellationToken token) + => AsyncClient.AddItemToListAsync(listId, value, token); + + async ValueTask IRedisListAsync.RemoveAtAsync(int index, CancellationToken token) + { + //TODO: replace with native implementation when one exists + var markForDelete = Guid.NewGuid().ToString(); + await AsyncClient.SetItemInListAsync(listId, index, markForDelete, token).ConfigureAwait(false); + await AsyncClient.RemoveItemFromListAsync(listId, markForDelete, token).ConfigureAwait(false); + } + + async ValueTask IRedisListAsync.ContainsAsync(string value, CancellationToken token) + { + //TODO: replace with native implementation when exists + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (existingItem == value) return true; + } + return false; + } + + ValueTask IRedisListAsync.ClearAsync(CancellationToken token) + => AsyncClient.RemoveAllFromListAsync(listId, token); + + async ValueTask IRedisListAsync.IndexOfAsync(string value, CancellationToken token) + { + //TODO: replace with native implementation when exists + var i = 0; + await foreach (var existingItem in this.ConfigureAwait(false).WithCancellation(token)) + { + if (existingItem == value) return i; + i++; + } + return -1; + } + + ValueTask IRedisListAsync.ElementAtAsync(int index, CancellationToken token) + => AsyncClient.GetItemFromListAsync(listId, index, token); + + ValueTask IRedisListAsync.SetValueAsync(int index, string value, CancellationToken token) + => AsyncClient.SetItemInListAsync(listId, index, value, token); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientList.cs b/src/ServiceStack.Redis/RedisClientList.cs index 8d80b101..1777723e 100644 --- a/src/ServiceStack.Redis/RedisClientList.cs +++ b/src/ServiceStack.Redis/RedisClientList.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -19,7 +19,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis list operations under a IList[string] interface. /// - internal class RedisClientList + internal partial class RedisClientList : IRedisList { private readonly RedisClient client; diff --git a/src/ServiceStack.Redis/RedisClientManagerCacheClient.Async.cs b/src/ServiceStack.Redis/RedisClientManagerCacheClient.Async.cs new file mode 100644 index 00000000..f77928b3 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientManagerCacheClient.Async.cs @@ -0,0 +1,165 @@ +using ServiceStack.Caching; +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisClientManagerCacheClient : ICacheClientAsync, IRemoveByPatternAsync, IAsyncDisposable + { + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + private ValueTask GetClientAsync(in CancellationToken token) + { + AssertNotReadOnly(); + return redisManager.GetClientAsync(token); + } + + async Task ICacheClientAsync.GetAsync(string key, CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + return await client.GetAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.SetAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.FlushAllAsync(CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + await client.FlushAllAsync(token).ConfigureAwait(false); + } + + async Task> ICacheClientAsync.GetAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + return await client.GetAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.SetAllAsync(IDictionary values, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + await client.SetAllAsync(values, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.RemoveAsync(string key, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.RemoveAsync(key, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.GetTimeToLiveAsync(string key, CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + return await client.GetTimeToLiveAsync(key, token).ConfigureAwait(false); + } + + async IAsyncEnumerable ICacheClientAsync.GetKeysByPatternAsync(string pattern, [EnumeratorCancellation] CancellationToken token) + { + await using var client = await redisManager.GetReadOnlyClientAsync(token).ConfigureAwait(false); + await foreach (var key in client.GetKeysByPatternAsync(pattern, token).ConfigureAwait(false).WithCancellation(token)) + { + yield return key; + } + } + + Task ICacheClientAsync.RemoveExpiredEntriesAsync(CancellationToken token) + { + //Redis automatically removed expired Cache Entries + return Task.CompletedTask; + } + + async Task IRemoveByPatternAsync.RemoveByPatternAsync(string pattern, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + if (client is IRemoveByPatternAsync redisClient) + { + await redisClient.RemoveByPatternAsync(pattern, token).ConfigureAwait(false); + } + } + + async Task IRemoveByPatternAsync.RemoveByRegexAsync(string regex, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + if (client is IRemoveByPatternAsync redisClient) + { + await redisClient.RemoveByRegexAsync(regex, token).ConfigureAwait(false); + } + } + + async Task ICacheClientAsync.RemoveAllAsync(IEnumerable keys, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + await client.RemoveAllAsync(keys, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.IncrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.IncrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.DecrementAsync(string key, uint amount, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.DecrementAsync(key, amount, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, DateTime expiresAt, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresAt, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.AddAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.AddAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + + async Task ICacheClientAsync.ReplaceAsync(string key, T value, TimeSpan expiresIn, CancellationToken token) + { + await using var client = await GetClientAsync(token).ConfigureAwait(false); + return await client.ReplaceAsync(key, value, expiresIn, token).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs b/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs index cbeb898b..6557bb46 100644 --- a/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs +++ b/src/ServiceStack.Redis/RedisClientManagerCacheClient.cs @@ -1,18 +1,19 @@ using System; using System.Collections.Generic; +using System.Linq; using ServiceStack.Caching; namespace ServiceStack.Redis { /// - /// For interoperabilty GetCacheClient() and GetReadOnlyCacheClient() + /// For interoperability GetCacheClient() and GetReadOnlyCacheClient() /// return an ICacheClient wrapper around the redis manager which has the affect of calling /// GetClient() for all write operations and GetReadOnlyClient() for the read ones. /// - /// This works well for master-slave replication scenarios where you have - /// 1 master that replicates to multiple read slaves. + /// This works well for master-replica replication scenarios where you have + /// 1 master that replicates to multiple read replicas. /// - public class RedisClientManagerCacheClient : ICacheClient, IRemoveByPattern, ICacheClientExtended + public partial class RedisClientManagerCacheClient : ICacheClient, IRemoveByPattern, ICacheClientExtended { private readonly IRedisClientsManager redisManager; @@ -30,18 +31,14 @@ public void Dispose() { } public T Get(string key) { - using (var client = redisManager.GetReadOnlyClient()) - { - return client.Get(key); - } + using var client = redisManager.GetReadOnlyClient(); + return client.Get(key); } public IDictionary GetAll(IEnumerable keys) { - using (var client = redisManager.GetReadOnlyClient()) - { - return client.GetAll(keys); - } + using var client = redisManager.GetReadOnlyClient(); + return client.GetAll(keys); } private void AssertNotReadOnly() @@ -58,162 +55,132 @@ public ICacheClient GetClient() public bool Remove(string key) { - using (var client = GetClient()) - { - return client.Remove(key); - } + using var client = GetClient(); + return client.Remove(key); } public void RemoveAll(IEnumerable keys) { - using (var client = GetClient()) - { - client.RemoveAll(keys); - } + using var client = GetClient(); + client.RemoveAll(keys); } public long Increment(string key, uint amount) { - using (var client = GetClient()) - { - return client.Increment(key, amount); - } + using var client = GetClient(); + return client.Increment(key, amount); } public long Decrement(string key, uint amount) { - using (var client = GetClient()) - { - return client.Decrement(key, amount); - } + using var client = GetClient(); + return client.Decrement(key, amount); } public bool Add(string key, T value) { - using (var client = GetClient()) - { - return client.Add(key, value); - } + using var client = GetClient(); + return client.Add(key, value); } public bool Set(string key, T value) { - using (var client = GetClient()) - { - return client.Set(key, value); - } + using var client = GetClient(); + return client.Set(key, value); } public bool Replace(string key, T value) { - using (var client = GetClient()) - { - return client.Replace(key, value); - } + using var client = GetClient(); + return client.Replace(key, value); } public bool Add(string key, T value, DateTime expiresAt) { - using (var client = GetClient()) - { - return client.Add(key, value, expiresAt); - } + using var client = GetClient(); + return client.Add(key, value, expiresAt); } public bool Set(string key, T value, DateTime expiresAt) { - using (var client = GetClient()) - { - return client.Set(key, value, expiresAt); - } + using var client = GetClient(); + return client.Set(key, value, expiresAt); } public bool Replace(string key, T value, DateTime expiresAt) { - using (var client = GetClient()) - { - return client.Replace(key, value, expiresAt); - } + using var client = GetClient(); + return client.Replace(key, value, expiresAt); } public bool Add(string key, T value, TimeSpan expiresIn) { - using (var client = GetClient()) - { - return client.Set(key, value, expiresIn); - } + using var client = GetClient(); + return client.Set(key, value, expiresIn); } public bool Set(string key, T value, TimeSpan expiresIn) { - using (var client = GetClient()) - { - return client.Set(key, value, expiresIn); - } + using var client = GetClient(); + return client.Set(key, value, expiresIn); } public bool Replace(string key, T value, TimeSpan expiresIn) { - using (var client = GetClient()) - { - return client.Replace(key, value, expiresIn); - } + using var client = GetClient(); + return client.Replace(key, value, expiresIn); } public void FlushAll() { - using (var client = GetClient()) - { - client.FlushAll(); - } + using var client = GetClient(); + client.FlushAll(); } public void SetAll(IDictionary values) { - using (var client = GetClient()) - { - client.SetAll(values); - } + using var client = GetClient(); + client.SetAll(values); } public void RemoveByPattern(string pattern) { - using (var client = GetClient()) + using var client = GetClient(); + if (client is IRemoveByPattern redisClient) { - var redisClient = client as RedisClient; - if (redisClient != null) - { - List keys = redisClient.Keys(pattern).ToStringList(); - if (keys.Count > 0) - redisClient.Del(keys.ToArray()); - } + redisClient.RemoveByPattern(pattern); } } public void RemoveByRegex(string pattern) { - RemoveByPattern(pattern.Replace(".*", "*").Replace(".+", "?")); + using var client = GetClient(); + if (client is IRemoveByPattern redisClient) + { + redisClient.RemoveByRegex(pattern); + } } public TimeSpan? GetTimeToLive(string key) { - using (var client = GetClient()) + using var client = GetClient(); + if (client is ICacheClientExtended redisClient) { - var redisClient = client as RedisClient; - if (redisClient != null) - { - return redisClient.GetTimeToLive(key); - } + return redisClient.GetTimeToLive(key); } + return null; } public IEnumerable GetKeysByPattern(string pattern) { - using (var client = (RedisClient)GetClient()) - { - return client.GetKeysByPattern(pattern); - } + using var client = (ICacheClientExtended)GetClient(); + return client.GetKeysByPattern(pattern).ToList(); + } + + public void RemoveExpiredEntries() + { + //Redis automatically removed expired Cache Entries } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientManagerConfig.cs b/src/ServiceStack.Redis/RedisClientManagerConfig.cs index 8ca2a21e..e3052805 100644 --- a/src/ServiceStack.Redis/RedisClientManagerConfig.cs +++ b/src/ServiceStack.Redis/RedisClientManagerConfig.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // diff --git a/src/ServiceStack.Redis/RedisClientSet.Async.cs b/src/ServiceStack.Redis/RedisClientSet.Async.cs new file mode 100644 index 00000000..b8d2d55f --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientSet.Async.cs @@ -0,0 +1,118 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientSet + : IRedisSetAsync + { + private IRedisSetAsync AsAsync() => this; + private IRedisClientAsync AsyncClient => client; + + ValueTask IRedisSetAsync.AddAsync(string item, CancellationToken token) + => AsyncClient.AddItemToSetAsync(setId, item, token); + + ValueTask IRedisSetAsync.ClearAsync(CancellationToken token) + => new ValueTask(AsyncClient.RemoveAsync(setId, token)); + + ValueTask IRedisSetAsync.ContainsAsync(string item, CancellationToken token) + => AsyncClient.SetContainsItemAsync(setId, item, token); + + ValueTask IRedisSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSetCountAsync(setId, token).AsInt32(); + + ValueTask> IRedisSetAsync.DiffAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.GetDifferencesFromSetAsync(setId, withSetIds, token); + } + + ValueTask> IRedisSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSetAsync(setId, token); + + IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + => AsyncClient.ScanAllSetItemsAsync(setId, token: token).GetAsyncEnumerator(token); // uses SSCAN + + ValueTask IRedisSetAsync.GetRandomEntryAsync(CancellationToken token) + => AsyncClient.GetRandomItemFromSetAsync(setId, token); + + ValueTask> IRedisSetAsync.GetRangeFromSortedSetAsync(int startingFrom, int endingAt, CancellationToken token) + => AsyncClient.GetSortedEntryValuesAsync(setId, startingFrom, endingAt, token); + + ValueTask> IRedisSetAsync.IntersectAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var allSetIds = MergeSetIds(withSets); + return AsyncClient.GetIntersectFromSetsAsync(allSetIds.ToArray(), token); + } + + ValueTask> IRedisSetAsync.IntersectAsync(params IRedisSetAsync[] withSets) + => AsAsync().IntersectAsync(withSets, token: default); + + private List MergeSetIds(IRedisSetAsync[] withSets) + { + var allSetIds = new List { setId }; + allSetIds.AddRange(withSets.ToList().ConvertAll(x => x.Id)); + return allSetIds; + } + + ValueTask IRedisSetAsync.MoveAsync(string value, IRedisSetAsync toSet, CancellationToken token) + => AsyncClient.MoveBetweenSetsAsync(setId, toSet.Id, value, token); + + ValueTask IRedisSetAsync.PopAsync(CancellationToken token) + => AsyncClient.PopItemFromSetAsync(setId, token); + + ValueTask IRedisSetAsync.RemoveAsync(string item, CancellationToken token) + => AsyncClient.RemoveItemFromSetAsync(setId, item, token).AwaitAsTrue(); // see Remove for why true + + ValueTask IRedisSetAsync.StoreDiffAsync(IRedisSetAsync fromSet, IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.StoreDifferencesFromSetAsync(setId, fromSet.Id, withSetIds, token); + } + + ValueTask IRedisSetAsync.StoreDiffAsync(IRedisSetAsync fromSet, params IRedisSetAsync[] withSets) + => AsAsync().StoreDiffAsync(fromSet, withSets, token: default); + + ValueTask IRedisSetAsync.StoreIntersectAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.StoreIntersectFromSetsAsync(setId, withSetIds, token); + } + + ValueTask IRedisSetAsync.StoreIntersectAsync(params IRedisSetAsync[] withSets) + => AsAsync().StoreIntersectAsync(withSets, token: default); + + ValueTask IRedisSetAsync.StoreUnionAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var withSetIds = withSets.ToList().ConvertAll(x => x.Id).ToArray(); + return AsyncClient.StoreUnionFromSetsAsync(setId, withSetIds, token); + } + + ValueTask IRedisSetAsync.StoreUnionAsync(params IRedisSetAsync[] withSets) + => AsAsync().StoreUnionAsync(withSets, token: default); + + ValueTask> IRedisSetAsync.UnionAsync(IRedisSetAsync[] withSets, CancellationToken token) + { + var allSetIds = MergeSetIds(withSets); + return AsyncClient.GetUnionFromSetsAsync(allSetIds.ToArray(), token); + } + + ValueTask> IRedisSetAsync.UnionAsync(params IRedisSetAsync[] withSets) + => AsAsync().UnionAsync(withSets, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientSet.cs b/src/ServiceStack.Redis/RedisClientSet.cs index dc96b566..daa58653 100644 --- a/src/ServiceStack.Redis/RedisClientSet.cs +++ b/src/ServiceStack.Redis/RedisClientSet.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -20,7 +20,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientSet + internal partial class RedisClientSet : IRedisSet { private readonly RedisClient client; diff --git a/src/ServiceStack.Redis/RedisClientSortedSet.Async.cs b/src/ServiceStack.Redis/RedisClientSortedSet.Async.cs new file mode 100644 index 00000000..a7afd56e --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientSortedSet.Async.cs @@ -0,0 +1,102 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Redis.Internal; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + internal partial class RedisClientSortedSet + : IRedisSortedSetAsync + { + private IRedisClientAsync AsyncClient => client; + + ValueTask IRedisSortedSetAsync.AddAsync(string value, CancellationToken token) + => AsyncClient.AddItemToSortedSetAsync(setId, value, token).Await(); + + private IRedisSortedSetAsync AsAsync() => this; + + ValueTask IRedisSortedSetAsync.ClearAsync(CancellationToken token) + => new ValueTask(AsyncClient.RemoveAsync(setId, token)); + + ValueTask IRedisSortedSetAsync.ContainsAsync(string value, CancellationToken token) + => AsyncClient.SortedSetContainsItemAsync(setId, value, token); + + ValueTask IRedisSortedSetAsync.CountAsync(CancellationToken token) + => AsyncClient.GetSortedSetCountAsync(setId, token).AsInt32(); + + ValueTask> IRedisSortedSetAsync.GetAllAsync(CancellationToken token) + => AsyncClient.GetAllItemsFromSortedSetAsync(setId, token); + + async IAsyncEnumerator IAsyncEnumerable.GetAsyncEnumerator(CancellationToken token) + { + // uses ZSCAN + await foreach (var pair in AsyncClient.ScanAllSortedSetItemsAsync(setId, token: token).ConfigureAwait(false)) + { + yield return pair.Key; + } + } + + ValueTask IRedisSortedSetAsync.GetItemIndexAsync(string value, CancellationToken token) + => AsyncClient.GetItemIndexInSortedSetAsync(setId, value, token); + + ValueTask IRedisSortedSetAsync.GetItemScoreAsync(string value, CancellationToken token) + => AsyncClient.GetItemScoreInSortedSetAsync(setId, value, token); + + ValueTask> IRedisSortedSetAsync.GetRangeAsync(int startingRank, int endingRank, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetAsync(setId, startingRank, endingRank, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(string fromStringScore, string toStringScore, CancellationToken token) + => AsAsync().GetRangeByScoreAsync(fromStringScore, toStringScore, null, null, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(string fromStringScore, string toStringScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(setId, fromStringScore, toStringScore, skip, take, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsAsync().GetRangeByScoreAsync(fromScore, toScore, null, null, token); + + ValueTask> IRedisSortedSetAsync.GetRangeByScoreAsync(double fromScore, double toScore, int? skip, int? take, CancellationToken token) + => AsyncClient.GetRangeFromSortedSetByLowestScoreAsync(setId, fromScore, toScore, skip, take, token); + + ValueTask IRedisSortedSetAsync.IncrementItemScoreAsync(string value, double incrementByScore, CancellationToken token) + => AsyncClient.IncrementItemInSortedSetAsync(setId, value, incrementByScore, token).Await(); + + ValueTask IRedisSortedSetAsync.PopItemWithHighestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithHighestScoreFromSortedSetAsync(setId, token); + + ValueTask IRedisSortedSetAsync.PopItemWithLowestScoreAsync(CancellationToken token) + => AsyncClient.PopItemWithLowestScoreFromSortedSetAsync(setId, token); + + ValueTask IRedisSortedSetAsync.RemoveAsync(string value, CancellationToken token) + => AsyncClient.RemoveItemFromSortedSetAsync(setId, value, token).AwaitAsTrue(); // see Remove() for why "true" + + ValueTask IRedisSortedSetAsync.RemoveRangeAsync(int fromRank, int toRank, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetAsync(setId, fromRank, toRank, token).Await(); + + ValueTask IRedisSortedSetAsync.RemoveRangeByScoreAsync(double fromScore, double toScore, CancellationToken token) + => AsyncClient.RemoveRangeFromSortedSetByScoreAsync(setId, fromScore, toScore, token).Await(); + + ValueTask IRedisSortedSetAsync.StoreFromIntersectAsync(IRedisSortedSetAsync[] ofSets, CancellationToken token) + => AsyncClient.StoreIntersectFromSortedSetsAsync(setId, ofSets.GetIds(), token).Await(); + + ValueTask IRedisSortedSetAsync.StoreFromIntersectAsync(params IRedisSortedSetAsync[] ofSets) + => AsAsync().StoreFromIntersectAsync(ofSets, token: default); + + ValueTask IRedisSortedSetAsync.StoreFromUnionAsync(IRedisSortedSetAsync[] ofSets, CancellationToken token) + => AsyncClient.StoreUnionFromSortedSetsAsync(setId, ofSets.GetIds(), token).Await(); + + ValueTask IRedisSortedSetAsync.StoreFromUnionAsync(params IRedisSortedSetAsync[] ofSets) + => AsAsync().StoreFromUnionAsync(ofSets, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientSortedSet.cs b/src/ServiceStack.Redis/RedisClientSortedSet.cs index 78086aec..adde5d12 100644 --- a/src/ServiceStack.Redis/RedisClientSortedSet.cs +++ b/src/ServiceStack.Redis/RedisClientSortedSet.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -20,7 +20,7 @@ namespace ServiceStack.Redis /// /// Wrap the common redis set operations under a ICollection[string] interface. /// - internal class RedisClientSortedSet + internal partial class RedisClientSortedSet : IRedisSortedSet { private readonly RedisClient client; diff --git a/src/ServiceStack.Redis/RedisClient_Admin.cs b/src/ServiceStack.Redis/RedisClient_Admin.cs index 0dc7e3f1..804df5f1 100644 --- a/src/ServiceStack.Redis/RedisClient_Admin.cs +++ b/src/ServiceStack.Redis/RedisClient_Admin.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -32,8 +32,13 @@ public RedisText GetServerRoleInfo() public string GetConfig(string configItem) { - var sb = StringBuilderCache.Allocate(); var byteArray = base.ConfigGet(configItem); + return GetConfigParse(byteArray); + } + + static string GetConfigParse(byte[][] byteArray) + { + var sb = StringBuilderCache.Allocate(); const int startAt = 1; //skip repeating config name for (var i = startAt; i < byteArray.Length; i++) { @@ -80,7 +85,11 @@ public long KillClients(string fromAddress = null, string withId = null, RedisCl public List> GetClientsInfo() { - var clientList = base.ClientList().FromUtf8Bytes(); + return GetClientsInfoParse(ClientList()); + } + private static List> GetClientsInfoParse(byte[] rawResult) + { + var clientList = rawResult.FromUtf8Bytes(); var results = new List>(); var lines = clientList.Split('\n'); @@ -108,6 +117,11 @@ public void PauseAllClients(TimeSpan duration) public DateTime GetServerTime() { var parts = base.Time(); + return ParseTimeResult(parts); + } + + private static DateTime ParseTimeResult(byte[][] parts) + { var unixTime = long.Parse(parts[0].FromUtf8Bytes()); var microSecs = long.Parse(parts[1].FromUtf8Bytes()); var ticks = microSecs / 1000 * TimeSpan.TicksPerMillisecond; diff --git a/src/ServiceStack.Redis/RedisClient_Hash.Async.cs b/src/ServiceStack.Redis/RedisClient_Hash.Async.cs new file mode 100644 index 00000000..9832b5d4 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_Hash.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient + { + internal partial class RedisClientHashes + : IHasNamed + { + IRedisHashAsync IHasNamed.this[string hashId] + { + get => new RedisClientHash(client, hashId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_Hash.cs b/src/ServiceStack.Redis/RedisClient_Hash.cs index 66ea80fd..afc71b60 100644 --- a/src/ServiceStack.Redis/RedisClient_Hash.cs +++ b/src/ServiceStack.Redis/RedisClient_Hash.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -23,7 +23,7 @@ public partial class RedisClient { public IHasNamed Hashes { get; set; } - internal class RedisClientHashes + internal partial class RedisClientHashes : IHasNamed { private readonly RedisClient client; @@ -59,12 +59,23 @@ public bool SetEntryInHashIfNotExists(string hashId, string key, string value) } public void SetRangeInHash(string hashId, IEnumerable> keyValuePairs) + { + if (SetRangeInHashPrepare(keyValuePairs, out var keys, out var values)) + { + base.HMSet(hashId, keys, values); + } + } + bool SetRangeInHashPrepare(IEnumerable> keyValuePairs, out byte[][] keys, out byte[][] values) { var keyValuePairsList = keyValuePairs.ToList(); - if (keyValuePairsList.Count == 0) return; + if (keyValuePairsList.Count == 0) + { + keys = values = default; + return false; + } - var keys = new byte[keyValuePairsList.Count][]; - var values = new byte[keyValuePairsList.Count][]; + keys = new byte[keyValuePairsList.Count][]; + values = new byte[keyValuePairsList.Count][]; for (var i = 0; i < keyValuePairsList.Count; i++) { @@ -72,8 +83,7 @@ public void SetRangeInHash(string hashId, IEnumerable + { + IRedisListAsync IHasNamed.this[string listId] + { + get => new RedisClientList(client, listId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_List.cs b/src/ServiceStack.Redis/RedisClient_List.cs index 77f7a0fd..d723b975 100644 --- a/src/ServiceStack.Redis/RedisClient_List.cs +++ b/src/ServiceStack.Redis/RedisClient_List.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -14,6 +14,7 @@ using System.Collections.Generic; using System.Linq; using ServiceStack.Model; +using ServiceStack.Redis.Pipeline; using ServiceStack.Text; namespace ServiceStack.Redis @@ -26,7 +27,7 @@ public partial class RedisClient public IHasNamed Lists { get; set; } - internal class RedisClientLists + internal partial class RedisClientLists : IHasNamed { private readonly RedisClient client; @@ -81,6 +82,15 @@ public void AddItemToList(string listId, string value) } public void AddRangeToList(string listId, List values) + { + var pipeline = AddRangeToListPrepareNonFlushed(listId, values); + pipeline.Flush(); + + //the number of items after + var intResults = pipeline.ReadAllAsInts(); + } + + private RedisPipelineCommand AddRangeToListPrepareNonFlushed(string listId, List values) { var uListId = listId.ToUtf8Bytes(); @@ -89,10 +99,7 @@ public void AddRangeToList(string listId, List values) { pipeline.WriteCommand(Commands.RPush, uListId, value.ToUtf8Bytes()); } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); + return pipeline; } public void PrependItemToList(string listId, string value) @@ -101,6 +108,15 @@ public void PrependItemToList(string listId, string value) } public void PrependRangeToList(string listId, List values) + { + var pipeline = PrependRangeToListPrepareNonFlushed(listId, values); + pipeline.Flush(); + + //the number of items after + var intResults = pipeline.ReadAllAsInts(); + } + + private RedisPipelineCommand PrependRangeToListPrepareNonFlushed(string listId, List values) { var uListId = listId.ToUtf8Bytes(); @@ -111,10 +127,7 @@ public void PrependRangeToList(string listId, List values) var value = values[i]; pipeline.WriteCommand(Commands.LPush, uListId, value.ToUtf8Bytes()); } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); + return pipeline; } public void RemoveAllFromList(string listId) diff --git a/src/ServiceStack.Redis/RedisClient_Set.Async.cs b/src/ServiceStack.Redis/RedisClient_Set.Async.cs new file mode 100644 index 00000000..3e6f3b21 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_Set.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient + { + internal partial class RedisClientSets + : IHasNamed + { + IRedisSetAsync IHasNamed.this[string setId] + { + get => new RedisClientSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} diff --git a/src/ServiceStack.Redis/RedisClient_Set.cs b/src/ServiceStack.Redis/RedisClient_Set.cs index 8b18f169..c99a00c4 100644 --- a/src/ServiceStack.Redis/RedisClient_Set.cs +++ b/src/ServiceStack.Redis/RedisClient_Set.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -13,6 +13,7 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Threading; using ServiceStack.Common; using ServiceStack.Model; using ServiceStack.Redis.Generic; @@ -26,7 +27,7 @@ public partial class RedisClient { public IHasNamed Sets { get; set; } - internal class RedisClientSets + internal partial class RedisClientSets : IHasNamed { private readonly RedisClient client; @@ -96,6 +97,11 @@ public List GetGeoCoordinates(string key, params string[] members) public string[] FindGeoMembersInRadius(string key, double longitude, double latitude, double radius, string unit) { var results = base.GeoRadius(key, longitude, latitude, radius, unit); + return ParseFindGeoMembersResult(results); + } + + private static string[] ParseFindGeoMembersResult(List results) + { var to = new string[results.Count]; for (var i = 0; i < results.Count; i++) { @@ -113,12 +119,7 @@ public List FindGeoResultsInRadius(string key, double longitude, public string[] FindGeoMembersInRadius(string key, string member, double radius, string unit) { var results = base.GeoRadiusByMember(key, member, radius, unit); - var to = new string[results.Count]; - for (var i = 0; i < results.Count; i++) - { - to[i] = results[i].Member; - } - return to; + return ParseFindGeoMembersResult(results); } public List FindGeoResultsInRadius(string key, string member, double radius, string unit, int? count = null, bool? sortByNearest = null) @@ -138,13 +139,30 @@ public void AddItemToSet(string setId, string item) } public void AddRangeToSet(string setId, List items) + { + if (AddRangeToSetNeedsSend(setId, items)) + { + var uSetId = setId.ToUtf8Bytes(); + var pipeline = CreatePipelineCommand(); + foreach (var item in items) + { + pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); + } + pipeline.Flush(); + + //the number of items after + _ = pipeline.ReadAllAsInts(); + } + } + + bool AddRangeToSetNeedsSend(string setId, List items) { if (setId.IsNullOrEmpty()) throw new ArgumentNullException("setId"); if (items == null) throw new ArgumentNullException("items"); if (items.Count == 0) - return; + return false; if (this.Transaction != null || this.Pipeline != null) { @@ -163,19 +181,11 @@ public void AddRangeToSet(string setId, List items) var item = items[i]; queueable.QueueCommand(c => c.AddItemToSet(setId, item)); } + return false; } else { - var uSetId = setId.ToUtf8Bytes(); - var pipeline = CreatePipelineCommand(); - foreach (var item in items) - { - pipeline.WriteCommand(Commands.SAdd, uSetId, item.ToUtf8Bytes()); - } - pipeline.Flush(); - - //the number of items after - var intResults = pipeline.ReadAllAsInts(); + return true; } } diff --git a/src/ServiceStack.Redis/RedisClient_Slowlog.cs b/src/ServiceStack.Redis/RedisClient_Slowlog.cs index eb36f3b9..ecde5244 100644 --- a/src/ServiceStack.Redis/RedisClient_Slowlog.cs +++ b/src/ServiceStack.Redis/RedisClient_Slowlog.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -24,6 +24,11 @@ public partial class RedisClient public IEnumerable GetSlowlog(int? numberOfRecords = null) { var data = Slowlog(numberOfRecords); + return ParseSlowlog(data); + } + + private static SlowlogItem[] ParseSlowlog(object[] data) + { var list = new SlowlogItem[data.Length]; for (int i = 0; i < data.Length; i++) { @@ -47,20 +52,4 @@ public IEnumerable GetSlowlog(int? numberOfRecords = null) } - - public class SlowlogItem - { - public SlowlogItem(int id, DateTime timeStamp, int duration, string[] arguments) - { - Id = id; - Timestamp = timeStamp; - Duration = duration; - Arguments = arguments; - } - - public int Id { get; private set; } - public int Duration { get; private set; } - public DateTime Timestamp { get; private set; } - public string[] Arguments { get; private set; } - } } diff --git a/src/ServiceStack.Redis/RedisClient_SortedSet.Async.cs b/src/ServiceStack.Redis/RedisClient_SortedSet.Async.cs new file mode 100644 index 00000000..b3a89ff3 --- /dev/null +++ b/src/ServiceStack.Redis/RedisClient_SortedSet.Async.cs @@ -0,0 +1,30 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using ServiceStack.Model; +using System; + +namespace ServiceStack.Redis +{ + public partial class RedisClient : IRedisClient + { + internal partial class RedisClientSortedSets + : IHasNamed + { + IRedisSortedSetAsync IHasNamed.this[string setId] + { + get => new RedisClientSortedSet(client, setId); + set => throw new NotSupportedException(); + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClient_SortedSet.cs b/src/ServiceStack.Redis/RedisClient_SortedSet.cs index fd909c57..55549114 100644 --- a/src/ServiceStack.Redis/RedisClient_SortedSet.cs +++ b/src/ServiceStack.Redis/RedisClient_SortedSet.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -15,6 +15,7 @@ using System.Globalization; using System.Linq; using ServiceStack.Model; +using ServiceStack.Redis.Pipeline; using ServiceStack.Redis.Support; using ServiceStack.Text; @@ -24,7 +25,7 @@ public partial class RedisClient : IRedisClient { public IHasNamed SortedSets { get; set; } - internal class RedisClientSortedSets + internal partial class RedisClientSortedSets : IHasNamed { private readonly RedisClient client; @@ -87,36 +88,29 @@ public bool AddItemToSortedSet(string setId, string value, long score) public bool AddRangeToSortedSet(string setId, List values, double score) { - var pipeline = CreatePipelineCommand(); - var uSetId = setId.ToUtf8Bytes(); - var uScore = score.ToFastUtf8Bytes(); - - foreach (var value in values) - { - pipeline.WriteCommand(Commands.ZAdd, uSetId, uScore, value.ToUtf8Bytes()); - } - + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToFastUtf8Bytes()); pipeline.Flush(); - var success = pipeline.ReadAllAsIntsHaveSuccess(); - return success; + return pipeline.ReadAllAsIntsHaveSuccess(); } public bool AddRangeToSortedSet(string setId, List values, long score) + { + var pipeline = AddRangeToSortedSetPrepareNonFlushed(setId, values, score.ToUtf8Bytes()); + pipeline.Flush(); + + return pipeline.ReadAllAsIntsHaveSuccess(); + } + RedisPipelineCommand AddRangeToSortedSetPrepareNonFlushed(string setId, List values, byte[] uScore) { var pipeline = CreatePipelineCommand(); var uSetId = setId.ToUtf8Bytes(); - var uScore = score.ToUtf8Bytes(); foreach (var value in values) { pipeline.WriteCommand(Commands.ZAdd, uSetId, uScore, value.ToUtf8Bytes()); } - - pipeline.Flush(); - - var success = pipeline.ReadAllAsIntsHaveSuccess(); - return success; + return pipeline; } public bool RemoveItemFromSortedSet(string setId, string value) diff --git a/src/ServiceStack.Redis/RedisClientsManagerExtensions.Async.cs b/src/ServiceStack.Redis/RedisClientsManagerExtensions.Async.cs new file mode 100644 index 00000000..123396ac --- /dev/null +++ b/src/ServiceStack.Redis/RedisClientsManagerExtensions.Async.cs @@ -0,0 +1,120 @@ +using ServiceStack.Caching; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Internal; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + /// + /// Useful wrapper IRedisClientsManager to cut down the boiler plate of most IRedisClient access + /// + public static partial class RedisClientsManagerExtensions + { + ///// + ///// Creates a PubSubServer that uses a background thread to listen and process for + ///// Redis Pub/Sub messages published to the specified channel. + ///// Use optional callbacks to listen for message, error and life-cycle events. + ///// Callbacks can be assigned later, then call Start() for PubSubServer to start listening for messages + ///// + //public static IRedisPubSubServer CreatePubSubServer(this IRedisClientsManager redisManager, + // string channel, + // Action onMessage = null, + // Action onError = null, + // Action onInit = null, + // Action onStart = null, + // Action onStop = null) + //{ + // return new RedisPubSubServer(redisManager, channel) + // { + // OnMessage = onMessage, + // OnError = onError, + // OnInit = onInit, + // OnStart = onStart, + // OnStop = onStop, + // }; + //} + + private static T InvalidAsyncClient(IRedisClientsManager manager, string method) where T : class + => throw new NotSupportedException($"The client returned from '{manager?.GetType().FullName ?? "(null)"}.{method}()' does not implement {typeof(T).Name}"); + + public static ValueTask GetClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetClientAsync(token) + : (redisManager.GetClient() as IRedisClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetClient))).AsValueTaskResult(); + } + + public static ValueTask GetReadOnlyClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetReadOnlyClientAsync(token) + : (redisManager.GetReadOnlyClient() as IRedisClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetReadOnlyClient))).AsValueTaskResult(); + } + + public static ValueTask GetCacheClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetCacheClientAsync(token) + : (redisManager.GetCacheClient() as ICacheClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetCacheClient))).AsValueTaskResult(); + } + + public static ValueTask GetReadOnlyCacheClientAsync(this IRedisClientsManager redisManager, CancellationToken token = default) + { + return redisManager is IRedisClientsManagerAsync asyncManager + ? asyncManager.GetReadOnlyCacheClientAsync(token) + : (redisManager.GetReadOnlyCacheClient() as ICacheClientAsync ?? InvalidAsyncClient(redisManager, nameof(redisManager.GetCacheClient))).AsValueTaskResult(); + } + + + public static async ValueTask ExecAsync(this IRedisClientsManager redisManager, Func lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + await lambda(redis).ConfigureAwait(false); + } + + public static async ValueTask ExecAsync(this IRedisClientsManager redisManager, Func> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis).ConfigureAwait(false); + } + + //public static void ExecTrans(this IRedisClientsManager redisManager, Action lambda) + //{ + // using (var redis = redisManager.GetClient()) + // using (var trans = redis.CreateTransaction()) + // { + // lambda(trans); + + // trans.Commit(); + // } + //} + + public static async ValueTask ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + await lambda(redis.As()).ConfigureAwait(false); + } + + public static async ValueTask ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis.As()).ConfigureAwait(false); + } + + public static async ValueTask> ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask>> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis.As()).ConfigureAwait(false); + } + + public static async ValueTask> ExecAsAsync(this IRedisClientsManager redisManager, Func, ValueTask>> lambda) + { + await using var redis = await redisManager.GetClientAsync().ConfigureAwait(false); + return await lambda(redis.As()).ConfigureAwait(false); + } + } + +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs b/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs index 9d4f8ce6..6f62b84b 100644 --- a/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs +++ b/src/ServiceStack.Redis/RedisClientsManagerExtensions.cs @@ -7,7 +7,7 @@ namespace ServiceStack.Redis /// /// Useful wrapper IRedisClientsManager to cut down the boiler plate of most IRedisClient access /// - public static class RedisClientsManagerExtensions + public static partial class RedisClientsManagerExtensions { /// /// Creates a PubSubServer that uses a background thread to listen and process for diff --git a/src/ServiceStack.Redis/RedisConfig.cs b/src/ServiceStack.Redis/RedisConfig.cs index 6ba14bb5..da28ffb5 100644 --- a/src/ServiceStack.Redis/RedisConfig.cs +++ b/src/ServiceStack.Redis/RedisConfig.cs @@ -52,26 +52,42 @@ public class RedisConfig /// public static int? DefaultMaxPoolSize; + /// + /// The default pool size multiplier if no pool size is specified (default 50) + /// + public static int DefaultPoolSizeMultiplier = 50; + /// /// The BackOff multiplier failed Auto Retries starts from (default 10ms) /// public static int BackOffMultiplier = 10; /// - /// The Byte Buffer Size to combine Redis Operations within (default 1450 bytes) + /// The Byte Buffer Size to combine Redis Operations within (1450 bytes) /// - public static int BufferLength = 1450; + public static int BufferLength => ServiceStack.Text.Pools.BufferPool.BUFFER_LENGTH; /// /// The Byte Buffer Size for Operations to use a byte buffer pool (default 500kb) /// public static int BufferPoolMaxSize = 500000; + /// + /// Batch size of keys to include in a single Redis Command (e.g. DEL k1 k2...) + /// + public static int CommandKeysBatchSize = 10000; + /// /// Whether Connections to Master hosts should be verified they're still master instances (default true) /// public static bool VerifyMasterConnections = true; + /// + /// Whether to retry re-connecting on same connection if not a master instance (default true) + /// For Managed Services (e.g. AWS ElastiCache) which eventually restores master instances on same host + /// + public static bool RetryReconnectOnFailedMasters = true; + /// /// The ConnectTimeout on clients used to find the next available host (default 200ms) /// @@ -83,20 +99,33 @@ public class RedisConfig public static int? AssumeServerVersion; /// - /// How long to hold deactivated clients for before disposing their connection (default 1 min) + /// How long to hold deactivated clients for before disposing their connection (default 0 seconds) /// Dispose of deactivated Clients immediately with TimeSpan.Zero /// - public static TimeSpan DeactivatedClientsExpiry = TimeSpan.FromMinutes(1); + public static TimeSpan DeactivatedClientsExpiry = TimeSpan.Zero; /// /// Whether Debug Logging should log detailed Redis operations (default false) /// - public static bool DisableVerboseLogging = false; + public static bool EnableVerboseLogging = false; + + [Obsolete("Use EnableVerboseLogging")] + public static bool DisableVerboseLogging + { + get => !EnableVerboseLogging; + set => EnableVerboseLogging = !value; + } //Example at: http://msdn.microsoft.com/en-us/library/office/dd633677(v=exchg.80).aspx public static LocalCertificateSelectionCallback CertificateSelectionCallback { get; set; } public static RemoteCertificateValidationCallback CertificateValidationCallback { get; set; } + /// + /// Assert all access using pooled RedisClient instance should be limited to same thread. + /// Captures StackTrace so is very slow, use only for debugging connection issues. + /// + public static bool AssertAccessOnlyOnSameThread = false; + /// /// Resets Redis Config and Redis Stats back to default values /// @@ -111,15 +140,17 @@ public static void Reset() DefaultIdleTimeOutSecs = 240; DefaultMaxPoolSize = null; BackOffMultiplier = 10; - BufferLength = 1450; BufferPoolMaxSize = 500000; + CommandKeysBatchSize = 10000; VerifyMasterConnections = true; + RetryReconnectOnFailedMasters = true; HostLookupTimeoutMs = 200; AssumeServerVersion = null; - DeactivatedClientsExpiry = TimeSpan.FromMinutes(1); - DisableVerboseLogging = false; + DeactivatedClientsExpiry = TimeSpan.Zero; + EnableVerboseLogging = false; CertificateSelectionCallback = null; CertificateValidationCallback = null; + AssertAccessOnlyOnSameThread = false; } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisDataExtensions.cs b/src/ServiceStack.Redis/RedisDataExtensions.cs index d164601c..5ca8c2fd 100644 --- a/src/ServiceStack.Redis/RedisDataExtensions.cs +++ b/src/ServiceStack.Redis/RedisDataExtensions.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Globalization; namespace ServiceStack.Redis { @@ -19,28 +20,28 @@ public static RedisText ToRedisText(this RedisData data) return to; } - public static string GetResult(this RedisText from) - { - return from.Text; - } + public static double ToDouble(this RedisData data) + => double.Parse(data.Data.FromUtf8Bytes(), + NumberStyles.Float, + CultureInfo.InvariantCulture); - public static T GetResult(this RedisText from) - { - return from.Text.FromJson(); - } + public static long ToInt64(this RedisData data) + => long.Parse(data.Data.FromUtf8Bytes(), + NumberStyles.Integer, + CultureInfo.InvariantCulture); + + public static string GetResult(this RedisText from) => from.Text; + + public static T GetResult(this RedisText from) => from.Text.FromJson(); public static List GetResults(this RedisText from) - { - return from.Children == null - ? new List() - : from.Children.ConvertAll(x => x.Text); - } + => from.Children == null + ? new List() + : from.Children.ConvertAll(x => x.Text); public static List GetResults(this RedisText from) - { - return from.Children == null - ? new List() - : from.Children.ConvertAll(x => x.Text.FromJson()); - } + => from.Children == null + ? new List() + : from.Children.ConvertAll(x => x.Text.FromJson()); } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisEndpoint.cs b/src/ServiceStack.Redis/RedisEndpoint.cs index 1310088e..b43da7da 100644 --- a/src/ServiceStack.Redis/RedisEndpoint.cs +++ b/src/ServiceStack.Redis/RedisEndpoint.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.ComponentModel; +using System.Security.Authentication; using System.Text; using ServiceStack.IO; using ServiceStack.Text; @@ -34,6 +35,7 @@ public RedisEndpoint(string host, int port, string password = null, long db = Re public string Host { get; set; } public int Port { get; set; } public bool Ssl { get; set; } + public SslProtocols? SslProtocols {get; set;} public int ConnectTimeout { get; set; } public int SendTimeout { get; set; } public int ReceiveTimeout { get; set; } @@ -54,11 +56,13 @@ public override string ToString() if (Client != null) args.Add("Client=" + Client); if (Password != null) - args.Add("Password=" + Password); + args.Add("Password=" + Password.UrlEncode()); if (Db != RedisConfig.DefaultDb) args.Add("Db=" + Db); if (Ssl) args.Add("Ssl=true"); + if (SslProtocols != null) + args.Add("SslProtocols=" + SslProtocols.ToString()); if (ConnectTimeout != RedisConfig.DefaultConnectTimeout) args.Add("ConnectTimeout=" + ConnectTimeout); if (SendTimeout != RedisConfig.DefaultSendTimeout) @@ -83,6 +87,7 @@ protected bool Equals(RedisEndpoint other) return string.Equals(Host, other.Host) && Port == other.Port && Ssl.Equals(other.Ssl) + && SslProtocols.Equals(other.SslProtocols) && ConnectTimeout == other.ConnectTimeout && SendTimeout == other.SendTimeout && ReceiveTimeout == other.ReceiveTimeout @@ -109,6 +114,7 @@ public override int GetHashCode() var hashCode = (Host != null ? Host.GetHashCode() : 0); hashCode = (hashCode * 397) ^ Port; hashCode = (hashCode * 397) ^ Ssl.GetHashCode(); + hashCode = (hashCode * 397) ^ SslProtocols.GetHashCode(); hashCode = (hashCode * 397) ^ ConnectTimeout; hashCode = (hashCode * 397) ^ SendTimeout; hashCode = (hashCode * 397) ^ ReceiveTimeout; diff --git a/src/ServiceStack.Redis/RedisExtensions.cs b/src/ServiceStack.Redis/RedisExtensions.cs index f3193ebe..b4ba79f7 100644 --- a/src/ServiceStack.Redis/RedisExtensions.cs +++ b/src/ServiceStack.Redis/RedisExtensions.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -15,6 +15,7 @@ using System.Globalization; using System.Linq; using System.Net.Sockets; +using System.Security.Authentication; using ServiceStack.Model; using ServiceStack.Text; @@ -76,6 +77,12 @@ public static RedisEndpoint ToRedisEndpoint(this string connectionString, int? d if (useDefaultPort) endpoint.Port = RedisConfig.DefaultPortSsl; break; + case "sslprotocols": + SslProtocols protocols; + value = value?.Replace("|", ","); + if (!Enum.TryParse(value, true, out protocols)) throw new ArgumentOutOfRangeException("Keyword '" + name + "' requires an SslProtocol value (multiple values separated by '|')."); + endpoint.SslProtocols = protocols; + break; case "client": endpoint.Client = value; break; diff --git a/src/ServiceStack.Redis/RedisLock.Async.cs b/src/ServiceStack.Redis/RedisLock.Async.cs new file mode 100644 index 00000000..8466c11f --- /dev/null +++ b/src/ServiceStack.Redis/RedisLock.Async.cs @@ -0,0 +1,93 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Internal; +using ServiceStack.Text; + +namespace ServiceStack.Redis +{ + public partial class RedisLock + : IAsyncDisposable + { + internal static ValueTask CreateAsync(IRedisClientAsync redisClient, string key, + TimeSpan? timeOut = default, CancellationToken token = default) + { + var obj = new RedisLock(redisClient, key); + return obj.AcquireAsync(timeOut, token).Await(obj); + } + + // async version of ExecUtils.RetryUntilTrue + private static async ValueTask RetryUntilTrue(Func> action, + TimeSpan? timeOut = null, CancellationToken token = default) + { + var i = 0; + var firstAttempt = DateTime.UtcNow; + + while (timeOut == null || DateTime.UtcNow - firstAttempt < timeOut.Value) + { + token.ThrowIfCancellationRequested(); + i++; + if (await action(token).ConfigureAwait(false)) + { + return; + } + await Task.Delay(ExecUtils.CalculateFullJitterBackOffDelay(i)).ConfigureAwait(false); + } + + throw new TimeoutException($"Exceeded timeout of {timeOut.Value}"); + } + + + private async ValueTask AcquireAsync(TimeSpan? timeOut, CancellationToken token) + { + var redisClient = (IRedisClientAsync)untypedClient; + await RetryUntilTrue( // .ConfigureAwait(false) is below + async ct => + { + //This pattern is taken from the redis command for SETNX http://redis.io/commands/setnx + + //Calculate a unix time for when the lock should expire + var realSpan = timeOut ?? new TimeSpan(365, 0, 0, 0); //if nothing is passed in the timeout hold for a year + var expireTime = DateTime.UtcNow.Add(realSpan); + var lockString = (expireTime.ToUnixTimeMs() + 1).ToString(); + + //Try to set the lock, if it does not exist this will succeed and the lock is obtained + var nx = await redisClient.SetValueIfNotExistsAsync(key, lockString, token: ct).ConfigureAwait(false); + if (nx) + return true; + + //If we've gotten here then a key for the lock is present. This could be because the lock is + //correctly acquired or it could be because a client that had acquired the lock crashed (or didn't release it properly). + //Therefore we need to get the value of the lock to see when it should expire + + await redisClient.WatchAsync(new[] { key }, ct).ConfigureAwait(false); + var lockExpireString = await redisClient.GetValueAsync(key, ct).ConfigureAwait(false); + if (!long.TryParse(lockExpireString, out var lockExpireTime)) + { + await redisClient.UnWatchAsync(ct).ConfigureAwait(false); // since the client is scoped externally + return false; + } + + //If the expire time is greater than the current time then we can't let the lock go yet + if (lockExpireTime > DateTime.UtcNow.ToUnixTimeMs()) + { + await redisClient.UnWatchAsync(ct).ConfigureAwait(false); // since the client is scoped externally + return false; + } + + //If the expire time is less than the current time then it wasn't released properly and we can attempt to + //acquire the lock. The above call to Watch(_lockKey) enrolled the key in monitoring, so if it changes + //before we call Commit() below, the Commit will fail and return false, which means that another thread + //was able to acquire the lock before we finished processing. + await using var trans = await redisClient.CreateTransactionAsync(ct).ConfigureAwait(false); + trans.QueueCommand(r => r.SetValueAsync(key, lockString)); + return await trans.CommitAsync(ct).ConfigureAwait(false); //returns false if Transaction failed + }, + timeOut, token + ).ConfigureAwait(false); + } + + ValueTask IAsyncDisposable.DisposeAsync() + => new ValueTask(((IRedisClientAsync)untypedClient).RemoveAsync(key)); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisLock.cs b/src/ServiceStack.Redis/RedisLock.cs index 6fda7ba6..abf3a9d9 100644 --- a/src/ServiceStack.Redis/RedisLock.cs +++ b/src/ServiceStack.Redis/RedisLock.cs @@ -1,29 +1,32 @@ -using System; -using ServiceStack.Common; using ServiceStack.Text; +using System; namespace ServiceStack.Redis { - public class RedisLock + public partial class RedisLock : IDisposable { - private readonly RedisClient redisClient; + private readonly object untypedClient; private readonly string key; - public RedisLock(RedisClient redisClient, string key, TimeSpan? timeOut) + private RedisLock(object redisClient, string key) { - this.redisClient = redisClient; + this.untypedClient = redisClient; this.key = key; + } - ExecExtensions.RetryUntilTrue( + public RedisLock(IRedisClient redisClient, string key, TimeSpan? timeOut) + : this(redisClient, key) + { + ExecUtils.RetryUntilTrue( () => { //This pattern is taken from the redis command for SETNX http://redis.io/commands/setnx //Calculate a unix time for when the lock should expire - TimeSpan realSpan = timeOut ?? new TimeSpan(365, 0, 0, 0); //if nothing is passed in the timeout hold for a year - DateTime expireTime = DateTime.UtcNow.Add(realSpan); - string lockString = (expireTime.ToUnixTimeMs() + 1).ToString(); + var realSpan = timeOut ?? new TimeSpan(365, 0, 0, 0); //if nothing is passed in the timeout hold for a year + var expireTime = DateTime.UtcNow.Add(realSpan); + var lockString = (expireTime.ToUnixTimeMs() + 1).ToString(); //Try to set the lock, if it does not exist this will succeed and the lock is obtained var nx = redisClient.SetValueIfNotExists(key, lockString); @@ -35,9 +38,8 @@ public RedisLock(RedisClient redisClient, string key, TimeSpan? timeOut) //Therefore we need to get the value of the lock to see when it should expire redisClient.Watch(key); - string lockExpireString = redisClient.Get(key); - long lockExpireTime; - if (!long.TryParse(lockExpireString, out lockExpireTime)) + var lockExpireString = redisClient.Get(key); + if (!long.TryParse(lockExpireString, out var lockExpireTime)) { redisClient.UnWatch(); // since the client is scoped externally return false; @@ -66,7 +68,7 @@ public RedisLock(RedisClient redisClient, string key, TimeSpan? timeOut) public void Dispose() { - redisClient.Remove(key); + ((IRedisClient)untypedClient).Remove(key); } } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisManagerPool.Async.cs b/src/ServiceStack.Redis/RedisManagerPool.Async.cs new file mode 100644 index 00000000..25406123 --- /dev/null +++ b/src/ServiceStack.Redis/RedisManagerPool.Async.cs @@ -0,0 +1,33 @@ +//Copyright (c) ServiceStack, Inc. All Rights Reserved. +//License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt + +using ServiceStack.Caching; +using ServiceStack.Redis.Internal; +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + public partial class RedisManagerPool + : IRedisClientsManagerAsync + { + ValueTask IRedisClientsManagerAsync.GetCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this).AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetClientAsync(CancellationToken token) + => GetClient(true).AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyCacheClientAsync(CancellationToken token) + => new RedisClientManagerCacheClient(this) { ReadOnly = true }.AsValueTaskResult(); + + ValueTask IRedisClientsManagerAsync.GetReadOnlyClientAsync(CancellationToken token) + => GetClient(true).AsValueTaskResult(); + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisManagerPool.cs b/src/ServiceStack.Redis/RedisManagerPool.cs index 2fc2b157..6394f037 100644 --- a/src/ServiceStack.Redis/RedisManagerPool.cs +++ b/src/ServiceStack.Redis/RedisManagerPool.cs @@ -1,4 +1,4 @@ -//Copyright (c) Service Stack LLC. All Rights Reserved. +//Copyright (c) ServiceStack, Inc. All Rights Reserved. //License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt using System; @@ -11,23 +11,33 @@ namespace ServiceStack.Redis { + /// + /// Configuration class for the RedisManagerPool + /// public class RedisPoolConfig { + /// + /// Default pool size used by every new instance of . (default: 40) + /// public static int DefaultMaxPoolSize = 40; public RedisPoolConfig() { + // maybe a bit overkill? could be deprecated if you add max int on RedisManagerPool MaxPoolSize = RedisConfig.DefaultMaxPoolSize ?? DefaultMaxPoolSize; } + /// + /// Maximum amount of s created by the . + /// public int MaxPoolSize { get; set; } } /// - /// Provides thread-safe pooling of redis client connections. + /// Provides thread-safe pooling of redis client connections. All connections are treated as read and write hosts. /// public partial class RedisManagerPool - : IRedisClientsManager, IRedisFailover, IHandleClientDispose, IHasRedisResolver + : IRedisClientsManager, IRedisFailover, IHandleClientDispose, IHasRedisResolver, IRedisClientCacheManager { private static readonly ILog Log = LogManager.GetLogger(typeof(RedisManagerPool)); @@ -51,6 +61,8 @@ public partial class RedisManagerPool public int MaxPoolSize { get; private set; } + public bool AssertAccessOnlyOnSameThread { get; set; } + public RedisManagerPool() : this(RedisConfig.DefaultHost) { } public RedisManagerPool(string host) : this(new[] { host }) { } public RedisManagerPool(string host, RedisPoolConfig config) : this(new[] { host }, config) { } @@ -59,7 +71,7 @@ public RedisManagerPool(IEnumerable hosts) : this(hosts, null) { } public RedisManagerPool(IEnumerable hosts, RedisPoolConfig config) { if (hosts == null) - throw new ArgumentNullException("hosts"); + throw new ArgumentNullException(nameof(hosts)); RedisResolver = new RedisResolver(hosts, null); @@ -73,6 +85,8 @@ public RedisManagerPool(IEnumerable hosts, RedisPoolConfig config) clients = new RedisClient[MaxPoolSize]; poolIndex = 0; + this.AssertAccessOnlyOnSameThread = RedisConfig.AssertAccessOnlyOnSameThread; + JsConfig.InitStatics(); } @@ -80,6 +94,8 @@ public void FailoverTo(params string[] readWriteHosts) { Interlocked.Increment(ref RedisState.TotalFailovers); + Log.Info($"FailoverTo: {string.Join(",", readWriteHosts)} Total: {RedisState.TotalFailovers}"); + lock (clients) { for (var i = 0; i < clients.Length; i++) @@ -118,7 +134,8 @@ public void FailoverTo(IEnumerable readWriteHosts, IEnumerable r /// Returns a Read/Write client (The default) using the hosts defined in ReadWriteHosts /// /// - public IRedisClient GetClient() + public IRedisClient GetClient() => GetClient(false); + private RedisClient GetClient(bool forAsync) { try { @@ -127,17 +144,18 @@ public IRedisClient GetClient() { AssertValidPool(); - RedisClient inActiveClient; //-1 when no available clients otherwise index of reservedSlot or existing Client - inactivePoolIndex = GetInActiveClient(out inActiveClient); + inactivePoolIndex = GetInActiveClient(out var inActiveClient); //inActiveClient != null only for Valid InActive Clients if (inActiveClient != null) { poolIndex++; - inActiveClient.Active = true; + inActiveClient.Activate(); - return inActiveClient; + return !AssertAccessOnlyOnSameThread + ? inActiveClient + : inActiveClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); } } @@ -160,14 +178,14 @@ public IRedisClient GetClient() lock (clients) { //Create new client outside of pool when max pool size exceeded - //Reverting free-slot not needed when -1 since slwo wasn't reserved or - //when existingClient changed (failover) since no longer reserver + //Reverting free-slot not needed when -1 since slow wasn't reserved or + //when existingClient changed (failover) since no longer reserved var stillReserved = inactivePoolIndex >= 0 && inactivePoolIndex < clients.Length && clients[inactivePoolIndex] == existingClient; if (inactivePoolIndex == -1 || !stillReserved) { if (Log.IsDebugEnabled) - Log.Debug("clients[inactivePoolIndex] != existingClient: {0}".Fmt(!stillReserved ? "!stillReserved" : "-1")); + Log.Debug($"POOL clients[inactivePoolIndex] != existingClient: {(!stillReserved ? "!stillReserved" : "-1")}"); Interlocked.Increment(ref RedisState.TotalClientsCreatedOutsidePool); @@ -178,7 +196,10 @@ public IRedisClient GetClient() poolIndex++; clients[inactivePoolIndex] = newClient; - return newClient; + + return (!AssertAccessOnlyOnSameThread || forAsync) + ? newClient + : newClient.LimitAccessToThread(Thread.CurrentThread.ManagedThreadId, Environment.StackTrace); } } catch @@ -202,7 +223,7 @@ public IRedisClient GetClient() public IRedisClient GetReadOnlyClient() { - return GetClient(); + return GetClient(false); } class ReservedClient : RedisClient @@ -225,7 +246,7 @@ public override void Dispose() { } private int GetInActiveClient(out RedisClient inactiveClient) { //this will loop through all hosts in readClients once even though there are 2 for loops - //both loops are used to try to get the prefered host according to the round robin algorithm + //both loops are used to try to get the preferred host according to the round robin algorithm var readWriteTotal = RedisResolver.ReadWriteHostsCount; var desiredIndex = poolIndex % clients.Length; for (int x = 0; x < readWriteTotal; x++) @@ -260,7 +281,7 @@ private int GetInActiveClient(out RedisClient inactiveClient) private RedisClient InitNewClient(RedisClient client) { client.Id = Interlocked.Increment(ref RedisClientCounter); - client.Active = true; + client.Activate(newClient:true); client.ClientManager = this; client.ConnectionFilter = ConnectionFilter; @@ -281,7 +302,8 @@ public void DisposeClient(RedisNativeClient client) } else { - client.Active = false; + client.TrackThread = null; + client.Deactivate(); } Monitor.PulseAll(clients); @@ -298,7 +320,7 @@ public void DisposeWriteClient(RedisNativeClient client) { lock (clients) { - client.Active = false; + client.Deactivate(); } } @@ -388,9 +410,9 @@ protected virtual void Dispose(bool disposing) try { // get rid of unmanaged resources - for (var i = 0; i < clients.Length; i++) + foreach (var client in clients) { - Dispose(clients[i]); + Dispose(client); } } catch (Exception ex) @@ -412,9 +434,7 @@ protected void Dispose(RedisClient redisClient) } catch (Exception ex) { - Log.Error(string.Format( - "Error when trying to dispose of RedisClient to host {0}:{1}", - redisClient.Host, redisClient.Port), ex); + Log.Error($"Error when trying to dispose of RedisClient to host {redisClient.Host}:{redisClient.Port}", ex); } } diff --git a/src/ServiceStack.Redis/RedisNativeClient.Async.cs b/src/ServiceStack.Redis/RedisNativeClient.Async.cs new file mode 100644 index 00000000..d265c0ae --- /dev/null +++ b/src/ServiceStack.Redis/RedisNativeClient.Async.cs @@ -0,0 +1,1487 @@ +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisNativeClient + : IRedisNativeClientAsync + { + internal IRedisPipelineSharedAsync PipelineAsync + => (IRedisPipelineSharedAsync)pipeline; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void AssertNotNull(object obj, string name = "key") + { + if (obj is null) Throw(name); + static void Throw(string name) => throw new ArgumentNullException(name); + } + + private IRedisNativeClientAsync AsAsync() => this; + + ValueTask IAsyncDisposable.DisposeAsync() + { + Dispose(); + return default; + } + + ValueTask IRedisNativeClientAsync.TimeAsync(CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.Time); + + ValueTask IRedisNativeClientAsync.ExistsAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Exists, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SetAsync(string key, byte[] value, bool exists, long expirySeconds, long expiryMilliseconds, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", nameof(value)); + + var entryExists = exists ? Commands.Xx : Commands.Nx; + byte[][] args; + if (expiryMilliseconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Px, expiryMilliseconds.ToUtf8Bytes(), entryExists }; + } + else if (expirySeconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Ex, expirySeconds.ToUtf8Bytes(), entryExists }; + } + else + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, entryExists }; + } + + return IsString(SendExpectStringAsync(token, args), OK); + } + ValueTask IRedisNativeClientAsync.SetAsync(string key, byte[] value, long expirySeconds, long expiryMilliseconds, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", nameof(value)); + + byte[][] args; + if (expiryMilliseconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Px, expiryMilliseconds.ToUtf8Bytes() }; + } + else if (expirySeconds != 0) + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value, Commands.Ex, expirySeconds.ToUtf8Bytes() }; + } + else + { + args = new[] { Commands.Set, key.ToUtf8Bytes(), value }; + } + + return SendExpectSuccessAsync(token, args); + } + + ValueTask IRedisNativeClientAsync.GetAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.Get, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DelAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Del, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ScanAsync(ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + return SendExpectScanResultAsync(token, Commands.Scan, cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + + return SendExpectScanResultAsync(token, Commands.Scan, cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.TypeAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectCodeAsync(token, Commands.Type, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RPushAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + + return SendExpectLongAsync(token, Commands.RPush, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SAddAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + + return SendExpectLongAsync(token, Commands.SAdd, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZAddAsync(string setId, double score, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + + return SendExpectLongAsync(token, Commands.ZAdd, setId.ToUtf8Bytes(), score.ToFastUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZAddAsync(string setId, long score, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + + return SendExpectLongAsync(token, Commands.ZAdd, setId.ToUtf8Bytes(), score.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.HSetAsync(string hashId, byte[] key, byte[] value, CancellationToken token) + => HSetAsync(hashId.ToUtf8Bytes(), key, value, token); + + internal ValueTask HSetAsync(byte[] hashId, byte[] key, byte[] value, CancellationToken token = default) + { + AssertHashIdAndKey(hashId, key); + + return SendExpectLongAsync(token, Commands.HSet, hashId, key, value); + } + + ValueTask IRedisNativeClientAsync.RandomKeyAsync(CancellationToken token) + => SendExpectDataAsync(token, Commands.RandomKey).FromUtf8BytesAsync(); + + ValueTask IRedisNativeClientAsync.RenameAsync(string oldKeyName, string newKeyName, CancellationToken token) + { + CheckRenameKeys(oldKeyName, newKeyName); + return SendExpectSuccessAsync(token, Commands.Rename, oldKeyName.ToUtf8Bytes(), newKeyName.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RenameNxAsync(string oldKeyName, string newKeyName, CancellationToken token) + { + CheckRenameKeys(oldKeyName, newKeyName); + return SendExpectLongAsync(token, Commands.RenameNx, oldKeyName.ToUtf8Bytes(), newKeyName.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.MSetAsync(byte[][] keys, byte[][] values, CancellationToken token) + { + var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); + + return SendExpectSuccessAsync(token, keysAndValues); + } + + + ValueTask IRedisNativeClientAsync.MSetAsync(string[] keys, byte[][] values, CancellationToken token) + => ((IRedisNativeClientAsync)this).MSetAsync(keys.ToMultiByteArray(), values, token); + + ValueTask IRedisNativeClientAsync.SelectAsync(long db, CancellationToken token) + { + this.db = db; + return SendExpectSuccessAsync(token, Commands.Select, db.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DelAsync(string[] keys, CancellationToken token) + { + AssertNotNull(keys, nameof(keys)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.Del, keys); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ExpireAsync(string key, int seconds, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Expire, key.ToUtf8Bytes(), seconds.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PExpireAsync(string key, long ttlMs, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.PExpire, key.ToUtf8Bytes(), ttlMs.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.ExpireAtAsync(string key, long unixTime, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.ExpireAt, key.ToUtf8Bytes(), unixTime.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PExpireAtAsync(string key, long unixTimeMs, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.PExpireAt, key.ToUtf8Bytes(), unixTimeMs.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.TtlAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Ttl, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PTtlAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.PTtl, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PingAsync(CancellationToken token) + => IsString(SendExpectCodeAsync(token, Commands.Ping), "PONG"); + + private static ValueTask IsString(ValueTask pending, string expected) + { + return pending.IsCompletedSuccessfully ? (pending.Result == expected).AsValueTaskResult() + : Awaited(pending, expected); + + static async ValueTask Awaited(ValueTask pending, string expected) + => await pending.ConfigureAwait(false) == expected; + } + + ValueTask IRedisNativeClientAsync.EchoAsync(string text, CancellationToken token) + => SendExpectDataAsync(token, Commands.Echo, text.ToUtf8Bytes()).FromUtf8BytesAsync(); + + ValueTask IRedisNativeClientAsync.DbSizeAsync(CancellationToken token) + => SendExpectLongAsync(token, Commands.DbSize); + + ValueTask IRedisNativeClientAsync.LastSaveAsync(CancellationToken token) + => SendExpectLongAsync(token, Commands.LastSave).Await(t => t.FromUnixTime()); + + ValueTask IRedisNativeClientAsync.SaveAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Save); + + ValueTask IRedisNativeClientAsync.BgSaveAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.BgSave); + + ValueTask IRedisNativeClientAsync.ShutdownAsync(bool noSave, CancellationToken token) + => noSave + ? SendWithoutReadAsync(token, Commands.Shutdown, Commands.NoSave) + : SendWithoutReadAsync(token, Commands.Shutdown); + + ValueTask IRedisNativeClientAsync.BgRewriteAofAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.BgRewriteAof); + + ValueTask IRedisNativeClientAsync.QuitAsync(CancellationToken token) + => SendWithoutReadAsync(token, Commands.Quit); + + ValueTask IRedisNativeClientAsync.FlushDbAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.FlushDb); + + ValueTask IRedisNativeClientAsync.FlushAllAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.FlushAll); + + ValueTask IRedisNativeClientAsync.SlaveOfAsync(string hostname, int port, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.SlaveOf, hostname.ToUtf8Bytes(), port.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.SlaveOfNoOneAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.SlaveOf, Commands.No, Commands.One); + + ValueTask IRedisNativeClientAsync.KeysAsync(string pattern, CancellationToken token) + { + AssertNotNull(pattern, nameof(pattern)); + return SendExpectMultiDataAsync(token, Commands.Keys, pattern.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.MGetAsync(string[] keys, CancellationToken token) + { + AssertNotNull(keys, nameof(keys)); + if (keys.Length == 0) + throw new ArgumentException("keys"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.MGet, keys); + + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SetExAsync(string key, int expireInSeconds, byte[] value, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", nameof(value)); + + return SendExpectSuccessAsync(token, Commands.SetEx, key.ToUtf8Bytes(), expireInSeconds.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.WatchAsync(string[] keys, CancellationToken token) + { + AssertNotNull(keys, nameof(keys)); + if (keys.Length == 0) + throw new ArgumentException("keys"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.Watch, keys); + + return SendExpectCodeAsync(token, cmdWithArgs).Await(); + } + + ValueTask IRedisNativeClientAsync.UnWatchAsync(CancellationToken token) + => SendExpectCodeAsync(token, Commands.UnWatch).Await(); + + ValueTask IRedisNativeClientAsync.AppendAsync(string key, byte[] value, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Append, key.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.GetRangeAsync(string key, int fromIndex, int toIndex, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.GetRange, key.ToUtf8Bytes(), fromIndex.ToUtf8Bytes(), toIndex.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SetRangeAsync(string key, int offset, byte[] value, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.SetRange, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.GetBitAsync(string key, int offset, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.GetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SetBitAsync(string key, int offset, int value, CancellationToken token) + { + AssertNotNull(key); + if (value > 1 || value < 0) + throw new ArgumentOutOfRangeException(nameof(value), "value is out of range"); + return SendExpectLongAsync(token, Commands.SetBit, key.ToUtf8Bytes(), offset.ToUtf8Bytes(), value.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PersistAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Persist, key.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PSetExAsync(string key, long expireInMs, byte[] value, CancellationToken token) + { + AssertNotNull(key); + return SendExpectSuccessAsync(token, Commands.PSetEx, key.ToUtf8Bytes(), expireInMs.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SetNXAsync(string key, byte[] value, CancellationToken token) + { + AssertNotNull(key); + value ??= TypeConstants.EmptyByteArray; + + if (value.Length > OneGb) + throw new ArgumentException("value exceeds 1G", "value"); + + return SendExpectLongAsync(token, Commands.SetNx, key.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SPopAsync(string setId, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectDataAsync(token, Commands.SPop, setId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SPopAsync(string setId, int count, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectMultiDataAsync(token, Commands.SPop, setId.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SlowlogResetAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Slowlog, "RESET".ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.SlowlogGetAsync(int? top, CancellationToken token) + { + if (top.HasValue) + return SendExpectDeeplyNestedMultiDataAsync(token, Commands.Slowlog, Commands.Get, top.Value.ToUtf8Bytes()); + else + return SendExpectDeeplyNestedMultiDataAsync(token, Commands.Slowlog, Commands.Get); + } + + ValueTask IRedisNativeClientAsync.ZCardAsync(string setId, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZCard, setId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZCountAsync(string setId, double min, double max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZScoreAsync(string setId, byte[] value, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectDoubleAsync(token, Commands.ZScore, setId.ToUtf8Bytes(), value); + } + + protected ValueTask RawCommandAsync(CancellationToken token, params object[] cmdWithArgs) + { + var byteArgs = new List(); + + foreach (var arg in cmdWithArgs) + { + if (arg == null) + { + byteArgs.Add(TypeConstants.EmptyByteArray); + continue; + } + + if (arg is byte[] bytes) + { + byteArgs.Add(bytes); + } + else if (arg.GetType().IsUserType()) + { + var json = arg.ToJson(); + byteArgs.Add(json.ToUtf8Bytes()); + } + else + { + var str = arg.ToString(); + byteArgs.Add(str.ToUtf8Bytes()); + } + } + + return SendExpectComplexResponseAsync(token, byteArgs.ToArray()); + } + + ValueTask> IRedisNativeClientAsync.InfoAsync(CancellationToken token) + => SendExpectStringAsync(token, Commands.Info).Await(ParseInfoResult); + + ValueTask IRedisNativeClientAsync.ZRangeByLexAsync(string setId, string min, string max, int? skip, int? take, CancellationToken token) + => SendExpectMultiDataAsync(token, GetZRangeByLexArgs(setId, min, max, skip, take)); + + ValueTask IRedisNativeClientAsync.ZLexCountAsync(string setId, string min, string max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + + return SendExpectLongAsync(token, + Commands.ZLexCount, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByLexAsync(string setId, string min, string max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + + return SendExpectLongAsync(token, + Commands.ZRemRangeByLex, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.CalculateSha1Async(string luaBody, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + byte[] buffer = Encoding.UTF8.GetBytes(luaBody); + return BitConverter.ToString(buffer.ToSha1Hash()).Replace("-", "").AsValueTaskResult(); + } + + ValueTask IRedisNativeClientAsync.ScriptExistsAsync(byte[][] sha1Refs, CancellationToken token) + { + var keysAndValues = MergeCommandWithArgs(Commands.Script, Commands.Exists, sha1Refs); + return SendExpectMultiDataAsync(token, keysAndValues); + } + + ValueTask IRedisNativeClientAsync.ScriptFlushAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Script, Commands.Flush); + + ValueTask IRedisNativeClientAsync.ScriptKillAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Script, Commands.Kill); + + ValueTask IRedisNativeClientAsync.ScriptLoadAsync(string body, CancellationToken token) + { + AssertNotNull(body, nameof(body)); + + var cmdArgs = MergeCommandWithArgs(Commands.Script, Commands.Load, body.ToUtf8Bytes()); + return SendExpectDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.StrLenAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.StrLen, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.LLenAsync(string listId, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectLongAsync(token, Commands.LLen, listId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.SCardAsync(string setId, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.SCard, setId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HLenAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectLongAsync(token, Commands.HLen, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.EvalCommandAsync(string luaBody, int numberKeysInArgs, byte[][] keys, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); + return RawCommandAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalShaCommandAsync(string sha1, int numberKeysInArgs, byte[][] keys, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); + return RawCommandAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalAsync(string luaBody, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectMultiDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalShaAsync(string sha1, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectMultiDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalIntAsync(string luaBody, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectLongAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalShaIntAsync(string sha1, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectLongAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.EvalStrAsync(string luaBody, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(luaBody, nameof(luaBody)); + + var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectDataAsync(token, cmdArgs).FromUtf8BytesAsync(); + } + + ValueTask IRedisNativeClientAsync.EvalShaStrAsync(string sha1, int numberOfKeys, byte[][] keysAndArgs, CancellationToken token) + { + AssertNotNull(sha1, nameof(sha1)); + + var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keysAndArgs.PrependInt(numberOfKeys)); + return SendExpectDataAsync(token, cmdArgs).FromUtf8BytesAsync(); + } + + ValueTask IRedisNativeClientAsync.SMembersAsync(string setId, CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.SMembers, setId.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.SAddAsync(string setId, byte[][] values, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + AssertNotNull(values, nameof(values)); + if (values.Length == 0) + throw new ArgumentException(nameof(values)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SAdd, setId.ToUtf8Bytes(), values); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SRemAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.SRem, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.IncrByAsync(string key, long count, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.IncrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.IncrByFloatAsync(string key, double incrBy, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDoubleAsync(token, Commands.IncrByFloat, key.ToUtf8Bytes(), incrBy.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.IncrAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Incr, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DecrAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Decr, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DecrByAsync(string key, long count, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.DecrBy, key.ToUtf8Bytes(), count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ConfigGetAsync(string pattern, CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.Config, Commands.Get, pattern.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.ConfigSetAsync(string item, byte[] value, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Config, Commands.Set, item.ToUtf8Bytes(), value); + + ValueTask IRedisNativeClientAsync.ConfigResetStatAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Config, Commands.ResetStat); + + ValueTask IRedisNativeClientAsync.ConfigRewriteAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Config, Commands.Rewrite); + + ValueTask IRedisNativeClientAsync.DebugSegfaultAsync(CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Debug, Commands.Segfault); + + ValueTask IRedisNativeClientAsync.DumpAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.Dump, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RestoreAsync(string key, long expireMs, byte[] dumpValue, CancellationToken token) + { + AssertNotNull(key); + return SendExpectDataAsync(token, Commands.Restore, key.ToUtf8Bytes(), expireMs.ToUtf8Bytes(), dumpValue); + } + + ValueTask IRedisNativeClientAsync.MigrateAsync(string host, int port, string key, int destinationDb, long timeoutMs, CancellationToken token) + { + AssertNotNull(key); + return SendExpectSuccessAsync(token, Commands.Migrate, host.ToUtf8Bytes(), port.ToUtf8Bytes(), key.ToUtf8Bytes(), destinationDb.ToUtf8Bytes(), timeoutMs.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.MoveAsync(string key, int db, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Move, key.ToUtf8Bytes(), db.ToUtf8Bytes()).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.ObjectIdleTimeAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.Object, Commands.IdleTime, key.ToUtf8Bytes()); + } + + async ValueTask IRedisNativeClientAsync.RoleAsync(CancellationToken token) + => (await SendExpectComplexResponseAsync(token, Commands.Role).ConfigureAwait(false)).ToRedisText(); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(object[] cmdWithArgs, CancellationToken token) + => SendExpectComplexResponseAsync(token, PrepareRawCommand(cmdWithArgs)); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(byte[][] cmdWithBinaryArgs, CancellationToken token) + => SendExpectComplexResponseAsync(token, cmdWithBinaryArgs); + + ValueTask IRedisNativeClientAsync.ClientGetNameAsync(CancellationToken token) + => SendExpectStringAsync(token, Commands.Client, Commands.GetName); + + ValueTask IRedisNativeClientAsync.ClientSetNameAsync(string name, CancellationToken token) + { + ClientValidateName(name); + return SendExpectSuccessAsync(token, Commands.Client, Commands.SetName, name.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ClientKillAsync(string clientAddr, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Client, Commands.Kill, clientAddr.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.ClientKillAsync(string addr, string id, string type, string skipMe, CancellationToken token) + => SendExpectLongAsync(token, ClientKillPrepareArgs(addr, id, type, skipMe)); + + ValueTask IRedisNativeClientAsync.ClientListAsync(CancellationToken token) + => SendExpectDataAsync(token, Commands.Client, Commands.List); + + ValueTask IRedisNativeClientAsync.ClientPauseAsync(int timeOutMs, CancellationToken token) + => SendExpectSuccessAsync(token, Commands.Client, Commands.Pause, timeOutMs.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.MSetNxAsync(byte[][] keys, byte[][] values, CancellationToken token) + { + var keysAndValues = MergeCommandWithKeysAndValues(Commands.MSet, keys, values); + return SendExpectLongAsync(token, keysAndValues).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.MSetNxAsync(string[] keys, byte[][] values, CancellationToken token) + => AsAsync().MSetNxAsync(keys.ToMultiByteArray(), values, token); + + ValueTask IRedisNativeClientAsync.GetSetAsync(string key, byte[] value, CancellationToken token) + { + GetSetAssertArgs(key, ref value); + return SendExpectDataAsync(token, Commands.GetSet, key.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.MGetAsync(byte[][] keys, CancellationToken token) + => SendExpectMultiDataAsync(token, MGetPrepareArgs(keys)); + + ValueTask IRedisNativeClientAsync.SScanAsync(string setId, ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + { + return SendExpectScanResultAsync(token, Commands.SScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResultAsync(token, Commands.SScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZScanAsync(string setId, ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + { + return SendExpectScanResultAsync(token, Commands.ZScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResultAsync(token, Commands.ZScan, + setId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HScanAsync(string hashId, ulong cursor, int count, string match, CancellationToken token) + { + if (match == null) + { + return SendExpectScanResultAsync(token, Commands.HScan, + hashId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + return SendExpectScanResultAsync(token, Commands.HScan, + hashId.ToUtf8Bytes(), cursor.ToUtf8Bytes(), + Commands.Match, match.ToUtf8Bytes(), + Commands.Count, count.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.PfAddAsync(string key, byte[][] elements, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PfAdd, key.ToUtf8Bytes(), elements); + return SendExpectLongAsync(token, cmdWithArgs).IsSuccessAsync(); + } + + ValueTask IRedisNativeClientAsync.PfCountAsync(string key, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PfCount, key.ToUtf8Bytes()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.PfMergeAsync(string toKeyId, string[] fromKeys, CancellationToken token) + { + var fromKeyBytes = fromKeys.Map(x => x.ToUtf8Bytes()).ToArray(); + var cmdWithArgs = MergeCommandWithArgs(Commands.PfMerge, toKeyId.ToUtf8Bytes(), fromKeyBytes); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SortAsync(string listOrSetId, SortOptions sortOptions, CancellationToken token) + => SendExpectMultiDataAsync(token, SortPrepareArgs(listOrSetId, sortOptions)); + + ValueTask IRedisNativeClientAsync.LRangeAsync(string listId, int startingFrom, int endingAt, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectMultiDataAsync(token, Commands.LRange, listId.ToUtf8Bytes(), startingFrom.ToUtf8Bytes(), endingAt.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RPushXAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + return SendExpectLongAsync(token, Commands.RPush, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LPushAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + return SendExpectLongAsync(token, Commands.LPush, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LPushXAsync(string listId, byte[] value, CancellationToken token) + { + AssertListIdAndValue(listId, value); + return SendExpectLongAsync(token, Commands.LPushX, listId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LTrimAsync(string listId, int keepStartingFrom, int keepEndingAt, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectSuccessAsync(token, Commands.LTrim, listId.ToUtf8Bytes(), keepStartingFrom.ToUtf8Bytes(), keepEndingAt.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.LRemAsync(string listId, int removeNoOfMatches, byte[] value, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectLongAsync(token, Commands.LRem, listId.ToUtf8Bytes(), removeNoOfMatches.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LIndexAsync(string listId, int listIndex, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectDataAsync(token, Commands.LIndex, listId.ToUtf8Bytes(), listIndex.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.LInsertAsync(string listId, bool insertBefore, byte[] pivot, byte[] value, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + var position = insertBefore ? Commands.Before : Commands.After; + return SendExpectSuccessAsync(token, Commands.LInsert, listId.ToUtf8Bytes(), position, pivot, value); + } + + ValueTask IRedisNativeClientAsync.LSetAsync(string listId, int listIndex, byte[] value, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectSuccessAsync(token, Commands.LSet, listId.ToUtf8Bytes(), listIndex.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.LPopAsync(string listId, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectDataAsync(token, Commands.LPop, listId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.RPopAsync(string listId, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectDataAsync(token, Commands.RPop, listId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.BLPopAsync(string listId, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectMultiDataAsync(token, Commands.BLPop, listId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.BLPopAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listIds, nameof(listIds)); + var args = new List { Commands.BLPop }; + args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); + args.Add(timeOutSecs.ToUtf8Bytes()); + return SendExpectMultiDataAsync(token, args.ToArray()); + } + + async ValueTask IRedisNativeClientAsync.BLPopValueAsync(string listId, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BLPopAsync(new[] { listId }, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse[1]; + } + + async ValueTask IRedisNativeClientAsync.BLPopValueAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BLPopAsync(listIds, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse; + } + + ValueTask IRedisNativeClientAsync.BRPopAsync(string listId, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listId, nameof(listId)); + return SendExpectMultiDataAsync(token, Commands.BRPop, listId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.BRPopAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + AssertNotNull(listIds, nameof(listIds)); + var args = new List { Commands.BRPop }; + args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); + args.Add(timeOutSecs.ToUtf8Bytes()); + return SendExpectMultiDataAsync(token, args.ToArray()); + } + + ValueTask IRedisNativeClientAsync.RPopLPushAsync(string fromListId, string toListId, CancellationToken token) + { + AssertNotNull(fromListId, nameof(fromListId)); + AssertNotNull(toListId, nameof(toListId)); + return SendExpectDataAsync(token, Commands.RPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes()); + } + + async ValueTask IRedisNativeClientAsync.BRPopValueAsync(string listId, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BRPopAsync(new[] { listId }, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse[1]; + } + + async ValueTask IRedisNativeClientAsync.BRPopValueAsync(string[] listIds, int timeOutSecs, CancellationToken token) + { + var blockingResponse = await AsAsync().BRPopAsync(listIds, timeOutSecs, token).ConfigureAwait(false); + return blockingResponse.Length == 0 + ? null + : blockingResponse; + } + + async ValueTask IRedisNativeClientAsync.BRPopLPushAsync(string fromListId, string toListId, int timeOutSecs, CancellationToken token) + { + AssertNotNull(fromListId, nameof(fromListId)); + AssertNotNull(toListId, nameof(toListId)); + byte[][] result = await SendExpectMultiDataAsync(token, Commands.BRPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); + return result.Length == 0 ? null : result[1]; + } + + ValueTask IRedisNativeClientAsync.SMoveAsync(string fromSetId, string toSetId, byte[] value, CancellationToken token) + { + AssertNotNull(fromSetId, nameof(fromSetId)); + AssertNotNull(toSetId, nameof(toSetId)); + return SendExpectSuccessAsync(token, Commands.SMove, fromSetId.ToUtf8Bytes(), toSetId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SIsMemberAsync(string setId, byte[] value, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.SIsMember, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.SInterAsync(string[] setIds, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.SInter, setIds); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SInterStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SInterStore, setIdsList.ToArray()); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SUnionAsync(string[] setIds, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.SUnion, setIds); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SUnionStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SUnionStore, setIdsList.ToArray()); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SDiffAsync(string fromSetId, string[] withSetIds, CancellationToken token) + { + var setIdsList = new List(withSetIds); + setIdsList.Insert(0, fromSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SDiff, setIdsList.ToArray()); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SDiffStoreAsync(string intoSetId, string fromSetId, string[] withSetIds, CancellationToken token) + { + var setIdsList = new List(withSetIds); + setIdsList.Insert(0, fromSetId); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.SDiffStore, setIdsList.ToArray()); + return SendExpectSuccessAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.SRandMemberAsync(string setId, CancellationToken token) + => SendExpectDataAsync(token, Commands.SRandMember, setId.ToUtf8Bytes()); + + ValueTask IRedisNativeClientAsync.ZRemAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.ZRem, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRemAsync(string setId, byte[][] values, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + AssertNotNull(values, nameof(values)); + if (values.Length == 0) + throw new ArgumentException("values"); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZRem, setId.ToUtf8Bytes(), values); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ZIncrByAsync(string setId, double incrBy, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectDoubleAsync(token, Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToFastUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZIncrByAsync(string setId, long incrBy, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectDoubleAsync(token, Commands.ZIncrBy, setId.ToUtf8Bytes(), incrBy.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRankAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.ZRank, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRevRankAsync(string setId, byte[] value, CancellationToken token) + { + AssertSetIdAndValue(setId, value); + return SendExpectLongAsync(token, Commands.ZRevRank, setId.ToUtf8Bytes(), value); + } + + ValueTask IRedisNativeClientAsync.ZRangeAsync(string setId, int min, int max, CancellationToken token) + => SendExpectMultiDataAsync(token, Commands.ZRange, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes()); + + private ValueTask GetRangeAsync(byte[] commandBytes, string setId, int min, int max, bool withScores, CancellationToken token) + { + var args = GetRangeArgs(commandBytes, setId, min, max, withScores); + return SendExpectMultiDataAsync(token, args); + } + + ValueTask IRedisNativeClientAsync.ZRangeWithScoresAsync(string setId, int min, int max, CancellationToken token) + => GetRangeAsync(Commands.ZRange, setId, min, max, true, token); + + ValueTask IRedisNativeClientAsync.ZRevRangeAsync(string setId, int min, int max, CancellationToken token) + => GetRangeAsync(Commands.ZRevRange, setId, min, max, false, token); + + ValueTask IRedisNativeClientAsync.ZRevRangeWithScoresAsync(string setId, int min, int max, CancellationToken token) + => GetRangeAsync(Commands.ZRevRange, setId, min, max, true, token); + + private ValueTask GetRangeByScoreAsync(byte[] commandBytes, + string setId, double min, double max, int? skip, int? take, bool withScores, CancellationToken token) + { + var args = GetRangeByScoreArgs(commandBytes, setId, min, max, skip, take, withScores); + return SendExpectMultiDataAsync(token, args); + } + + ValueTask IRedisNativeClientAsync.ZRangeByScoreAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, false, token); + + ValueTask IRedisNativeClientAsync.ZRangeByScoreAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, false, token); + + ValueTask IRedisNativeClientAsync.ZRangeByScoreWithScoresAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, true, token); + + ValueTask IRedisNativeClientAsync.ZRangeByScoreWithScoresAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + => GetRangeByScoreAsync(Commands.ZRangeByScore, setId, min, max, skip, take, true, token); + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, false, token); + } + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, false, token); + } + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreWithScoresAsync(string setId, double min, double max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, true, token); + } + + ValueTask IRedisNativeClientAsync.ZRevRangeByScoreWithScoresAsync(string setId, long min, long max, int? skip, int? take, CancellationToken token) + { + //Note: http://redis.io/commands/zrevrangebyscore has max, min in the wrong other + return GetRangeByScoreAsync(Commands.ZRevRangeByScore, setId, max, min, skip, take, true, token); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByRankAsync(string setId, int min, int max, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZRemRangeByRank, setId.ToUtf8Bytes(), + min.ToUtf8Bytes(), max.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByScoreAsync(string setId, double fromScore, double toScore, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), + fromScore.ToFastUtf8Bytes(), toScore.ToFastUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZRemRangeByScoreAsync(string setId, long fromScore, long toScore, CancellationToken token) + { + AssertNotNull(setId, nameof(setId)); + return SendExpectLongAsync(token, Commands.ZRemRangeByScore, setId.ToUtf8Bytes(), + fromScore.ToUtf8Bytes(), toScore.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.ZUnionStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, setIds.Length.ToString()); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, setIdsList.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ZInterStoreAsync(string intoSetId, string[] setIds, CancellationToken token) + { + var setIdsList = new List(setIds); + setIdsList.Insert(0, setIds.Length.ToString()); + setIdsList.Insert(0, intoSetId); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, setIdsList.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + internal ValueTask ZInterStoreAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + { + var totalArgs = new List(setIds); + totalArgs.Insert(0, setIds.Length.ToString()); + totalArgs.Insert(0, intoSetId); + totalArgs.AddRange(args); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZInterStore, totalArgs.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + internal ValueTask ZUnionStoreAsync(string intoSetId, string[] setIds, string[] args, CancellationToken token) + { + var totalArgs = new List(setIds); + totalArgs.Insert(0, setIds.Length.ToString()); + totalArgs.Insert(0, intoSetId); + totalArgs.AddRange(args); + + var cmdWithArgs = MergeCommandWithArgs(Commands.ZUnionStore, totalArgs.ToArray()); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.HMSetAsync(string hashId, byte[][] keys, byte[][] values, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + var cmdArgs = MergeCommandWithKeysAndValues(Commands.HMSet, hashId.ToUtf8Bytes(), keys, values); + return SendExpectSuccessAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.HSetNXAsync(string hashId, byte[] key, byte[] value, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HSetNx, hashId.ToUtf8Bytes(), key, value); + } + + ValueTask IRedisNativeClientAsync.HIncrbyAsync(string hashId, byte[] key, int incrementBy, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HIncrBy, hashId.ToUtf8Bytes(), key, incrementBy.ToString().ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HIncrbyFloatAsync(string hashId, byte[] key, double incrementBy, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectDoubleAsync(token, Commands.HIncrByFloat, hashId.ToUtf8Bytes(), key, incrementBy.ToString(CultureInfo.InvariantCulture).ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HGetAsync(string hashId, byte[] key, CancellationToken token) + => HGetAsync(hashId.ToUtf8Bytes(), key, token); + + private ValueTask HGetAsync(byte[] hashId, byte[] key, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectDataAsync(token, Commands.HGet, hashId, key); + } + + ValueTask IRedisNativeClientAsync.HMGetAsync(string hashId, byte[][] keys, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + if (keys.Length == 0) + throw new ArgumentNullException(nameof(keys)); + + var cmdArgs = MergeCommandWithArgs(Commands.HMGet, hashId.ToUtf8Bytes(), keys); + return SendExpectMultiDataAsync(token, cmdArgs); + } + + ValueTask IRedisNativeClientAsync.HDelAsync(string hashId, byte[] key, CancellationToken token) + => HDelAsync(hashId.ToUtf8Bytes(), key, token); + + private ValueTask HDelAsync(byte[] hashId, byte[] key, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HDel, hashId, key); + } + + ValueTask IRedisNativeClientAsync.HExistsAsync(string hashId, byte[] key, CancellationToken token) + { + AssertHashIdAndKey(hashId, key); + return SendExpectLongAsync(token, Commands.HExists, hashId.ToUtf8Bytes(), key); + } + + ValueTask IRedisNativeClientAsync.HKeysAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectMultiDataAsync(token, Commands.HKeys, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HValsAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectMultiDataAsync(token, Commands.HVals, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.HGetAllAsync(string hashId, CancellationToken token) + { + AssertNotNull(hashId, nameof(hashId)); + return SendExpectMultiDataAsync(token, Commands.HGetAll, hashId.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.GeoAddAsync(string key, double longitude, double latitude, string member, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + AssertNotNull(member, nameof(member)); + return SendExpectLongAsync(token, Commands.GeoAdd, key.ToUtf8Bytes(), longitude.ToUtf8Bytes(), latitude.ToUtf8Bytes(), member.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.GeoAddAsync(string key, RedisGeo[] geoPoints, CancellationToken token) + { + var cmdWithArgs = GeoAddPrepareArgs(key, geoPoints); + return SendExpectLongAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.GeoDistAsync(string key, string fromMember, string toMember, string unit, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + + return unit == null + ? SendExpectDoubleAsync(token, Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes()) + : SendExpectDoubleAsync(token, Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes(), unit.ToUtf8Bytes()); + } + + async ValueTask IRedisNativeClientAsync.GeoHashAsync(string key, string[] members, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.GeoHash, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); + var result = await SendExpectMultiDataAsync(token, cmdWithArgs).ConfigureAwait(false); + return result.ToStringArray(); + } + + async ValueTask> IRedisNativeClientAsync.GeoPosAsync(string key, string[] members, CancellationToken token) + { + AssertNotNull(key, nameof(key)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.GeoPos, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); + var data = await SendExpectComplexResponseAsync(token, cmdWithArgs).ConfigureAwait(false); + return GeoPosParseResult(members, data); + } + + async ValueTask> IRedisNativeClientAsync.GeoRadiusAsync(string key, double longitude, double latitude, double radius, string unit, bool withCoords, bool withDist, bool withHash, int? count, bool? asc, CancellationToken token) + { + var cmdWithArgs = GeoRadiusPrepareArgs(key, longitude, latitude, radius, unit, + withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = (await SendExpectMultiDataAsync(token, cmdWithArgs).ConfigureAwait(false)).ToStringArray(); + foreach (var member in members) + { + to.Add(new RedisGeoResult { Member = member }); + } + } + else + { + var data = await SendExpectComplexResponseAsync(token, cmdWithArgs).ConfigureAwait(false); + GetRadiusParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + async ValueTask> IRedisNativeClientAsync.GeoRadiusByMemberAsync(string key, string member, double radius, string unit, bool withCoords, bool withDist, bool withHash, int? count, bool? asc, CancellationToken token) + { + var cmdWithArgs = GeoRadiusByMemberPrepareArgs(key, member, radius, unit, withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = (await SendExpectMultiDataAsync(token, cmdWithArgs).ConfigureAwait(false)).ToStringArray(); + foreach (var x in members) + { + to.Add(new RedisGeoResult { Member = x }); + } + } + else + { + var data = await SendExpectComplexResponseAsync(token, cmdWithArgs).ConfigureAwait(false); + GeoRadiusByMemberParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + ValueTask IRedisNativeClientAsync.PublishAsync(string toChannel, byte[] message, CancellationToken token) + => SendExpectLongAsync(token, Commands.Publish, toChannel.ToUtf8Bytes(), message); + + ValueTask IRedisNativeClientAsync.SubscribeAsync(string[] toChannels, CancellationToken token) + { + if (toChannels.Length == 0) + throw new ArgumentNullException(nameof(toChannels)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.Subscribe, toChannels); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.UnSubscribeAsync(string[] fromChannels, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.UnSubscribe, fromChannels); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.PSubscribeAsync(string[] toChannelsMatchingPatterns, CancellationToken token) + { + if (toChannelsMatchingPatterns.Length == 0) + throw new ArgumentNullException(nameof(toChannelsMatchingPatterns)); + + var cmdWithArgs = MergeCommandWithArgs(Commands.PSubscribe, toChannelsMatchingPatterns); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.PUnSubscribeAsync(string[] fromChannelsMatchingPatterns, CancellationToken token) + { + var cmdWithArgs = MergeCommandWithArgs(Commands.PUnSubscribe, fromChannelsMatchingPatterns); + return SendExpectMultiDataAsync(token, cmdWithArgs); + } + + ValueTask IRedisNativeClientAsync.ReceiveMessagesAsync(CancellationToken token) + => ReadMultiDataAsync(token); + + ValueTask IRedisNativeClientAsync.CreateSubscriptionAsync(CancellationToken token) + => new RedisSubscription(this).AsValueTaskResult(); + + ValueTask IRedisNativeClientAsync.BitCountAsync(string key, CancellationToken token) + { + AssertNotNull(key); + return SendExpectLongAsync(token, Commands.BitCount, key.ToUtf8Bytes()); + } + + ValueTask IRedisNativeClientAsync.DelAsync(params string[] keys) + => AsAsync().DelAsync(keys, default); + + ValueTask IRedisNativeClientAsync.SInterStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().SInterStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.SUnionAsync(params string[] setIds) + => AsAsync().SUnionAsync(setIds, default); + + ValueTask IRedisNativeClientAsync.WatchAsync(params string[] keys) + => AsAsync().WatchAsync(keys, default); + + ValueTask IRedisNativeClientAsync.SubscribeAsync(params string[] toChannels) + => AsAsync().SubscribeAsync(toChannels, default); + + ValueTask IRedisNativeClientAsync.UnSubscribeAsync(params string[] toChannels) + => AsAsync().UnSubscribeAsync(toChannels, default); + + ValueTask IRedisNativeClientAsync.PSubscribeAsync(params string[] toChannelsMatchingPatterns) + => AsAsync().PSubscribeAsync(toChannelsMatchingPatterns, default); + + ValueTask IRedisNativeClientAsync.PUnSubscribeAsync(params string[] toChannelsMatchingPatterns) + => AsAsync().PUnSubscribeAsync(toChannelsMatchingPatterns, default); + + ValueTask IRedisNativeClientAsync.SInterAsync(params string[] setIds) + => AsAsync().SInterAsync(setIds, default); + + ValueTask IRedisNativeClientAsync.SDiffAsync(string fromSetId, params string[] withSetIds) + => AsAsync().SDiffAsync(fromSetId, withSetIds, default); + + ValueTask IRedisNativeClientAsync.SDiffStoreAsync(string intoSetId, string fromSetId, params string[] withSetIds) + => AsAsync().SDiffStoreAsync(intoSetId, fromSetId, withSetIds, default); + + ValueTask IRedisNativeClientAsync.ZUnionStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().ZUnionStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.ZInterStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().ZInterStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.EvalCommandAsync(string luaBody, int numberKeysInArgs, params byte[][] keys) + => AsAsync().EvalCommandAsync(luaBody, numberKeysInArgs, keys, default); + + ValueTask IRedisNativeClientAsync.EvalShaCommandAsync(string sha1, int numberKeysInArgs, params byte[][] keys) + => AsAsync().EvalShaCommandAsync(sha1, numberKeysInArgs, keys, default); + + ValueTask IRedisNativeClientAsync.EvalAsync(string luaBody, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalAsync(luaBody, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalShaAsync(string sha1, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalShaAsync(sha1, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalIntAsync(string luaBody, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalIntAsync(luaBody, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalShaIntAsync(string sha1, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalShaIntAsync(sha1, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalStrAsync(string luaBody, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalStrAsync(luaBody, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.EvalShaStrAsync(string sha1, int numberOfKeys, params byte[][] keysAndArgs) + => AsAsync().EvalShaStrAsync(sha1, numberOfKeys, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(params object[] cmdWithArgs) + => AsAsync().RawCommandAsync(cmdWithArgs, default); + + ValueTask IRedisNativeClientAsync.RawCommandAsync(params byte[][] cmdWithBinaryArgs) + => AsAsync().RawCommandAsync(cmdWithBinaryArgs, default); + + ValueTask IRedisNativeClientAsync.MGetAsync(params string[] keys) + => AsAsync().MGetAsync(keys, default); + + ValueTask IRedisNativeClientAsync.PfAddAsync(string key, params byte[][] elements) + => AsAsync().PfAddAsync(key, elements, default); + + ValueTask IRedisNativeClientAsync.HMGetAsync(string hashId, params byte[][] keysAndArgs) + => AsAsync().HMGetAsync(hashId, keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.MGetAsync(params byte[][] keysAndArgs) + => AsAsync().MGetAsync(keysAndArgs, default); + + ValueTask IRedisNativeClientAsync.SUnionStoreAsync(string intoSetId, params string[] setIds) + => AsAsync().SUnionStoreAsync(intoSetId, setIds, default); + + ValueTask IRedisNativeClientAsync.ScriptExistsAsync(params byte[][] sha1Refs) + => AsAsync().ScriptExistsAsync(sha1Refs, default); + + ValueTask IRedisNativeClientAsync.PfMergeAsync(string toKeyId, params string[] fromKeys) + => AsAsync().PfMergeAsync(toKeyId, fromKeys, default); + + ValueTask IRedisNativeClientAsync.GeoAddAsync(string key, params RedisGeo[] geoPoints) + => AsAsync().GeoAddAsync(key, geoPoints, default); + + ValueTask IRedisNativeClientAsync.GeoHashAsync(string key, params string[] members) + => AsAsync().GeoHashAsync(key, members, default); + + ValueTask> IRedisNativeClientAsync.GeoPosAsync(string key, params string[] members) + => AsAsync().GeoPosAsync(key, members, default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisNativeClient.cs b/src/ServiceStack.Redis/RedisNativeClient.cs index 86cf8022..220f0405 100644 --- a/src/ServiceStack.Redis/RedisNativeClient.cs +++ b/src/ServiceStack.Redis/RedisNativeClient.cs @@ -21,14 +21,19 @@ using ServiceStack.Logging; using ServiceStack.Redis.Pipeline; using ServiceStack.Text; +using System.Security.Authentication; namespace ServiceStack.Redis { /// /// This class contains all the common operations for the RedisClient. /// The client contains a 1:1 mapping of c# methods to redis operations of the same name. - /// - /// Not threadsafe use a pooled manager + /// + /// Not threadsafe, use a pooled manager! + /// All redis calls on a single instances write to the same Socket. + /// If used in multiple threads (or async Tasks) at the same time you will find + /// that commands are not executed properly by redis and Servicestack wont be able to (json) serialize + /// the data that comes back. /// public partial class RedisNativeClient : IRedisNativeClient @@ -46,25 +51,25 @@ public partial class RedisNativeClient internal long deactivatedAtTicks; public DateTime? DeactivatedAt { - get - { - return deactivatedAtTicks != 0 - ? new DateTime(Interlocked.Read(ref deactivatedAtTicks), DateTimeKind.Utc) - : (DateTime?)null; - } + get => deactivatedAtTicks != 0 + ? new DateTime(Interlocked.Read(ref deactivatedAtTicks), DateTimeKind.Utc) + : (DateTime?)null; set { - var ticksValue = value == null ? 0 : value.Value.Ticks; + var ticksValue = value?.Ticks ?? 0; Interlocked.Exchange(ref deactivatedAtTicks, ticksValue); } } - public bool HadExceptions { get { return deactivatedAtTicks > 0; } } + public bool HadExceptions => deactivatedAtTicks > 0; protected Socket socket; + [Obsolete("The direct stream is no longer directly available", true)] // API BREAKING CHANGE since exposed protected BufferedStream Bstream; protected SslStream sslStream; + private BufferedReader bufferedReader; + private IRedisTransactionBase transaction; private IRedisPipelineShared pipeline; @@ -77,14 +82,8 @@ public DateTime? DeactivatedAt private int active; internal bool Active { - get - { - return Interlocked.CompareExchange(ref active, 0, 0) == YES; - } - set - { - Interlocked.Exchange(ref active, value ? YES : NO); - } + get => Interlocked.CompareExchange(ref active, 0, 0) == YES; + private set => Interlocked.Exchange(ref active, value ? YES : NO); } internal IHandleClientDispose ClientManager { get; set; } @@ -96,6 +95,7 @@ internal bool Active public string Host { get; private set; } public int Port { get; private set; } public bool Ssl { get; private set; } + public SslProtocols? SslProtocols { get; private set; } /// /// Gets or sets object key prefix. @@ -105,8 +105,8 @@ internal bool Active private TimeSpan retryTimeout; public int RetryTimeout { - get { return (int) retryTimeout.TotalMilliseconds; } - set { retryTimeout = TimeSpan.FromMilliseconds(value); } + get => (int)retryTimeout.TotalMilliseconds; + set => retryTimeout = TimeSpan.FromMilliseconds(value); } public int RetryCount { get; set; } public int SendTimeout { get; set; } @@ -119,10 +119,7 @@ public int RetryTimeout internal IRedisTransactionBase Transaction { - get - { - return transaction; - } + get => transaction; set { if (value != null) @@ -133,10 +130,7 @@ internal IRedisTransactionBase Transaction internal IRedisPipelineShared Pipeline { - get - { - return pipeline; - } + get => pipeline; set { if (value != null) @@ -188,8 +182,10 @@ private void Init(RedisEndpoint config) Client = config.Client; Db = config.Db; Ssl = config.Ssl; + SslProtocols = config.SslProtocols; IdleTimeOutSecs = config.IdleTimeOutSecs; ServerVersionNumber = RedisConfig.AssumeServerVersion.GetValueOrDefault(); + LogPrefix = "#" + ClientId + " "; JsConfig.InitStatics(); } @@ -201,10 +197,7 @@ public RedisNativeClient() long db; public long Db { - get - { - return db; - } + get => db; set { @@ -217,19 +210,14 @@ public long Db } } + public void ChangeDb(long db) { this.db = db; SendExpectSuccess(Commands.Select, db.ToUtf8Bytes()); } - public long DbSize - { - get - { - return SendExpectLong(Commands.DbSize); - } - } + public long DbSize => SendExpectLong(Commands.DbSize); public DateTime LastSave { @@ -242,35 +230,40 @@ public DateTime LastSave public Dictionary Info { - get - { - var lines = SendExpectString(Commands.Info); - var info = new Dictionary(); + get => ParseInfoResult(SendExpectString(Commands.Info)); + } - foreach (var line in lines - .Split(new[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)) - { - var p = line.IndexOf(':'); - if (p == -1) continue; + private static Dictionary ParseInfoResult(string lines) + { + var info = new Dictionary(); - info.Add(line.Substring(0, p), line.Substring(p + 1)); - } + foreach (var line in lines + .Split(new[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)) + { + var p = line.IndexOf(':'); + if (p == -1) continue; - return info; + info[line.Substring(0, p)] = line.Substring(p + 1); } + + return info; } public string ServerVersion { get { - string version; - this.Info.TryGetValue("redis_version", out version); + this.Info.TryGetValue("redis_version", out var version); return version; } } public RedisData RawCommand(params object[] cmdWithArgs) + { + return SendExpectComplexResponse(PrepareRawCommand(cmdWithArgs)); + } + + private static byte[][] PrepareRawCommand(object[] cmdWithArgs) { var byteArgs = new List(); @@ -282,8 +275,7 @@ public RedisData RawCommand(params object[] cmdWithArgs) continue; } - var bytes = arg as byte[]; - if (bytes != null) + if (arg is byte[] bytes) { byteArgs.Add(bytes); } @@ -298,9 +290,7 @@ public RedisData RawCommand(params object[] cmdWithArgs) byteArgs.Add(str.ToUtf8Bytes()); } } - - var data = SendExpectComplexResponse(byteArgs.ToArray()); - return data; + return byteArgs.ToArray(); } public RedisData RawCommand(params byte[][] cmdWithBinaryArgs) @@ -412,8 +402,11 @@ public string Type(string key) } public RedisKeyType GetEntryType(string key) + => ParseEntryType(Type(key)); + + private protected RedisKeyType ParseEntryType(string type) { - switch (Type(key)) + switch (type) { case "none": return RedisKeyType.None; @@ -428,7 +421,7 @@ public RedisKeyType GetEntryType(string key) case "hash": return RedisKeyType.Hash; } - throw CreateResponseError("Invalid value"); + throw CreateResponseError($"Invalid Type '{type}'"); } public long StrLen(string key) @@ -590,6 +583,12 @@ public byte[] GetBytes(string key) } public byte[] GetSet(string key, byte[] value) + { + GetSetAssertArgs(key, ref value); + return SendExpectData(Commands.GetSet, key.ToUtf8Bytes(), value); + } + + private static void GetSetAssertArgs(string key, ref byte[] value) { if (key == null) throw new ArgumentNullException("key"); @@ -598,8 +597,6 @@ public byte[] GetSet(string key, byte[] value) if (value.Length > OneGb) throw new ArgumentException("value exceeds 1G", "value"); - - return SendExpectData(Commands.GetSet, key.ToUtf8Bytes(), value); } public long Exists(string key) @@ -737,21 +734,21 @@ public string RandomKey() public void Rename(string oldKeyname, string newKeyname) { - if (oldKeyname == null) - throw new ArgumentNullException("oldKeyname"); - if (newKeyname == null) - throw new ArgumentNullException("newKeyname"); - + CheckRenameKeys(oldKeyname, newKeyname); SendExpectSuccess(Commands.Rename, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()); } - public bool RenameNx(string oldKeyname, string newKeyname) + private protected static void CheckRenameKeys(string oldKeyname, string newKeyname) { if (oldKeyname == null) throw new ArgumentNullException("oldKeyname"); if (newKeyname == null) throw new ArgumentNullException("newKeyname"); + } + public bool RenameNx(string oldKeyname, string newKeyname) + { + CheckRenameKeys(oldKeyname, newKeyname); return SendExpectLong(Commands.RenameNx, oldKeyname.ToUtf8Bytes(), newKeyname.ToUtf8Bytes()) == Success; } @@ -869,14 +866,18 @@ public string ClientGetName() } public void ClientSetName(string name) + { + ClientValidateName(name); + SendExpectSuccess(Commands.Client, Commands.SetName, name.ToUtf8Bytes()); + } + + private static void ClientValidateName(string name) { if (string.IsNullOrEmpty(name)) throw new ArgumentException("Name cannot be null or empty"); if (name.Contains(" ")) throw new ArgumentException("Name cannot contain spaces"); - - SendExpectSuccess(Commands.Client, Commands.SetName, name.ToUtf8Bytes()); } public void ClientPause(int timeOutMs) @@ -895,11 +896,16 @@ public void ClientKill(string clientAddr) } public long ClientKill(string addr = null, string id = null, string type = null, string skipMe = null) + { + return SendExpectLong(ClientKillPrepareArgs(addr, id, type, skipMe)); + } + + static byte[][] ClientKillPrepareArgs(string addr, string id, string type, string skipMe) { var cmdWithArgs = new List - { - Commands.Client, Commands.Kill, - }; + { + Commands.Client, Commands.Kill, + }; if (addr != null) { @@ -924,8 +930,7 @@ public long ClientKill(string addr = null, string id = null, string type = null, cmdWithArgs.Add(Commands.SkipMe); cmdWithArgs.Add(skipMe.ToUtf8Bytes()); } - - return SendExpectLong(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } public byte[][] Keys(string pattern) @@ -937,15 +942,18 @@ public byte[][] Keys(string pattern) } public byte[][] MGet(params byte[][] keys) + { + return SendExpectMultiData(MGetPrepareArgs(keys)); + } + + private static byte[][] MGetPrepareArgs(byte[][] keys) { if (keys == null) throw new ArgumentNullException("keys"); if (keys.Length == 0) throw new ArgumentException("keys"); - var cmdWithArgs = MergeCommandWithArgs(Commands.MGet, keys); - - return SendExpectMultiData(cmdWithArgs); + return MergeCommandWithArgs(Commands.MGet, keys); } public byte[][] MGet(params string[] keys) @@ -1055,6 +1063,10 @@ internal ScanResult SendExpectScanResult(byte[] cmd, params byte[][] args) { var cmdWithArgs = MergeCommandWithArgs(cmd, args); var multiData = SendExpectDeeplyNestedMultiData(cmdWithArgs); + return ParseScanResult(multiData); + } + internal static ScanResult ParseScanResult(object[] multiData) + { var counterBytes = (byte[])multiData[0]; var ret = new ScanResult @@ -1256,11 +1268,16 @@ public byte[][] LRange(string listId, int startingFrom, int endingAt) } public byte[][] Sort(string listOrSetId, SortOptions sortOptions) + { + return SendExpectMultiData(SortPrepareArgs(listOrSetId, sortOptions)); + } + + private static byte[][] SortPrepareArgs(string listOrSetId, SortOptions sortOptions) { var cmdWithArgs = new List - { - Commands.Sort, listOrSetId.ToUtf8Bytes() - }; + { + Commands.Sort, listOrSetId.ToUtf8Bytes() + }; if (sortOptions.SortPattern != null) { @@ -1296,8 +1313,7 @@ public byte[][] Sort(string listOrSetId, SortOptions sortOptions) cmdWithArgs.Add(Commands.Store); cmdWithArgs.Add(sortOptions.StoreAtKey.ToUtf8Bytes()); } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } public long RPush(string listId, byte[] value) @@ -1432,8 +1448,7 @@ public byte[][] BLPop(string[] listIds, int timeOutSecs) { if (listIds == null) throw new ArgumentNullException("listIds"); - var args = new List(); - args.Add(Commands.BLPop); + var args = new List { Commands.BLPop }; args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); args.Add(timeOutSecs.ToUtf8Bytes()); return SendExpectMultiData(args.ToArray()); @@ -1467,8 +1482,7 @@ public byte[][] BRPop(string[] listIds, int timeOutSecs) { if (listIds == null) throw new ArgumentNullException("listIds"); - var args = new List(); - args.Add(Commands.BRPop); + var args = new List { Commands.BRPop }; args.AddRange(listIds.Select(listId => listId.ToUtf8Bytes())); args.Add(timeOutSecs.ToUtf8Bytes()); return SendExpectMultiData(args.ToArray()); @@ -1493,9 +1507,9 @@ public byte[][] BRPopValue(string[] listIds, int timeOutSecs) public byte[] RPopLPush(string fromListId, string toListId) { if (fromListId == null) - throw new ArgumentNullException("fromListId"); + throw new ArgumentNullException(nameof(fromListId)); if (toListId == null) - throw new ArgumentNullException("toListId"); + throw new ArgumentNullException(nameof(toListId)); return SendExpectData(Commands.RPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes()); } @@ -1503,9 +1517,9 @@ public byte[] RPopLPush(string fromListId, string toListId) public byte[] BRPopLPush(string fromListId, string toListId, int timeOutSecs) { if (fromListId == null) - throw new ArgumentNullException("fromListId"); + throw new ArgumentNullException(nameof(fromListId)); if (toListId == null) - throw new ArgumentNullException("toListId"); + throw new ArgumentNullException(nameof(toListId)); byte[][] result = SendExpectMultiData(Commands.BRPopLPush, fromListId.ToUtf8Bytes(), toListId.ToUtf8Bytes(), timeOutSecs.ToUtf8Bytes()); return result.Length == 0 ? null : result[1]; @@ -1697,21 +1711,26 @@ public long ZRevRank(string setId, byte[] value) } private byte[][] GetRange(byte[] commandBytes, string setId, int min, int max, bool withScores) + { + var args = GetRangeArgs(commandBytes, setId, min, max, withScores); + return SendExpectMultiData(args); + } + + private static byte[][] GetRangeArgs(byte[] commandBytes, string setId, int min, int max, bool withScores) { if (string.IsNullOrEmpty(setId)) throw new ArgumentNullException("setId"); var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; + { + commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() + }; if (withScores) { cmdWithArgs.Add(Commands.WithScores); } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } public byte[][] ZRange(string setId, int min, int max) @@ -1736,14 +1755,21 @@ public byte[][] ZRevRangeWithScores(string setId, int min, int max) private byte[][] GetRangeByScore(byte[] commandBytes, string setId, double min, double max, int? skip, int? take, bool withScores) + { + var args = GetRangeByScoreArgs(commandBytes, setId, min, max, skip, take, withScores); + return SendExpectMultiData(args); + } + + private static byte[][] GetRangeByScoreArgs(byte[] commandBytes, + string setId, double min, double max, int? skip, int? take, bool withScores) { if (setId == null) throw new ArgumentNullException("setId"); var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToFastUtf8Bytes(), max.ToFastUtf8Bytes() - }; + { + commandBytes, setId.ToUtf8Bytes(), min.ToFastUtf8Bytes(), max.ToFastUtf8Bytes() + }; if (skip.HasValue || take.HasValue) { @@ -1756,34 +1782,14 @@ private byte[][] GetRangeByScore(byte[] commandBytes, { cmdWithArgs.Add(Commands.WithScores); } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } private byte[][] GetRangeByScore(byte[] commandBytes, string setId, long min, long max, int? skip, int? take, bool withScores) { - if (setId == null) - throw new ArgumentNullException("setId"); - - var cmdWithArgs = new List - { - commandBytes, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; - - if (skip.HasValue || take.HasValue) - { - cmdWithArgs.Add(Commands.Limit); - cmdWithArgs.Add(skip.GetValueOrDefault(0).ToUtf8Bytes()); - cmdWithArgs.Add(take.GetValueOrDefault(0).ToUtf8Bytes()); - } - - if (withScores) - { - cmdWithArgs.Add(Commands.WithScores); - } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + var args = GetRangeByScoreArgs(commandBytes, setId, min, max, skip, take, withScores); + return SendExpectMultiData(args); } public byte[][] ZRangeByScore(string setId, double min, double max, int? skip, int? take) @@ -1931,15 +1937,15 @@ public long ZInterStore(string intoSetId, string[] setIds, string[] args) return SendExpectLong(cmdWithArgs); } - public byte[][] ZRangeByLex(string setId, string min, string max, int? skip = null, int? take = null) + static byte[][] GetZRangeByLexArgs(string setId, string min, string max, int? skip, int? take) { if (setId == null) throw new ArgumentNullException("setId"); var cmdWithArgs = new List - { - Commands.ZRangeByLex, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() - }; + { + Commands.ZRangeByLex, setId.ToUtf8Bytes(), min.ToUtf8Bytes(), max.ToUtf8Bytes() + }; if (skip.HasValue || take.HasValue) { @@ -1947,9 +1953,10 @@ public byte[][] ZRangeByLex(string setId, string min, string max, int? skip = nu cmdWithArgs.Add(skip.GetValueOrDefault(0).ToUtf8Bytes()); cmdWithArgs.Add(take.GetValueOrDefault(0).ToUtf8Bytes()); } - - return SendExpectMultiData(cmdWithArgs.ToArray()); + return cmdWithArgs.ToArray(); } + public byte[][] ZRangeByLex(string setId, string min, string max, int? skip = null, int? take = null) + => SendExpectMultiData(GetZRangeByLexArgs(setId, min, max, skip, take)); public long ZLexCount(string setId, string min, string max) { @@ -1977,9 +1984,9 @@ public long ZRemRangeByLex(string setId, string min, string max) private static void AssertHashIdAndKey(object hashId, byte[] key) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); } public long HSet(string hashId, byte[] key, byte[] value) @@ -2004,7 +2011,7 @@ public long HSetNX(string hashId, byte[] key, byte[] value) public void HMSet(string hashId, byte[][] keys, byte[][] values) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); var cmdArgs = MergeCommandWithKeysAndValues(Commands.HMSet, hashId.ToUtf8Bytes(), keys, values); @@ -2047,9 +2054,9 @@ public byte[] HGet(byte[] hashId, byte[] key) public byte[][] HMGet(string hashId, params byte[][] keys) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); if (keys.Length == 0) - throw new ArgumentNullException("keys"); + throw new ArgumentNullException(nameof(keys)); var cmdArgs = MergeCommandWithArgs(Commands.HMGet, hashId.ToUtf8Bytes(), keys); @@ -2071,11 +2078,11 @@ public long HDel(byte[] hashId, byte[] key) public long HDel(string hashId, byte[][] keys) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); if (keys == null) - throw new ArgumentNullException("keys"); + throw new ArgumentNullException(nameof(keys)); if (keys.Length == 0) - throw new ArgumentException("keys"); + throw new ArgumentException(nameof(keys)); var cmdWithArgs = MergeCommandWithArgs(Commands.HDel, hashId.ToUtf8Bytes(), keys); return SendExpectLong(cmdWithArgs); @@ -2090,7 +2097,7 @@ public long HExists(string hashId, byte[] key) public long HLen(string hashId) { if (string.IsNullOrEmpty(hashId)) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectLong(Commands.HLen, hashId.ToUtf8Bytes()); } @@ -2098,7 +2105,7 @@ public long HLen(string hashId) public byte[][] HKeys(string hashId) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectMultiData(Commands.HKeys, hashId.ToUtf8Bytes()); } @@ -2106,7 +2113,7 @@ public byte[][] HKeys(string hashId) public byte[][] HVals(string hashId) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectMultiData(Commands.HVals, hashId.ToUtf8Bytes()); } @@ -2114,7 +2121,7 @@ public byte[][] HVals(string hashId) public byte[][] HGetAll(string hashId) { if (hashId == null) - throw new ArgumentNullException("hashId"); + throw new ArgumentNullException(nameof(hashId)); return SendExpectMultiData(Commands.HGetAll, hashId.ToUtf8Bytes()); } @@ -2129,7 +2136,7 @@ public byte[][] ReceiveMessages() return ReadMultiData(); } - public IRedisSubscription CreateSubscription() + public virtual IRedisSubscription CreateSubscription() { return new RedisSubscription(this); } @@ -2137,7 +2144,7 @@ public IRedisSubscription CreateSubscription() public byte[][] Subscribe(params string[] toChannels) { if (toChannels.Length == 0) - throw new ArgumentNullException("toChannels"); + throw new ArgumentNullException(nameof(toChannels)); var cmdWithArgs = MergeCommandWithArgs(Commands.Subscribe, toChannels); return SendExpectMultiData(cmdWithArgs); @@ -2152,7 +2159,7 @@ public byte[][] UnSubscribe(params string[] fromChannels) public byte[][] PSubscribe(params string[] toChannelsMatchingPatterns) { if (toChannelsMatchingPatterns.Length == 0) - throw new ArgumentNullException("toChannelsMatchingPatterns"); + throw new ArgumentNullException(nameof(toChannelsMatchingPatterns)); var cmdWithArgs = MergeCommandWithArgs(Commands.PSubscribe, toChannelsMatchingPatterns); return SendExpectMultiData(cmdWithArgs); @@ -2177,17 +2184,23 @@ public RedisPipelineCommand CreatePipelineCommand() public long GeoAdd(string key, double longitude, double latitude, string member) { if (key == null) - throw new ArgumentNullException("key"); - if (key == null) - throw new ArgumentNullException("member"); + throw new ArgumentNullException(nameof(key)); + if (member == null) + throw new ArgumentNullException(nameof(member)); return SendExpectLong(Commands.GeoAdd, key.ToUtf8Bytes(), longitude.ToUtf8Bytes(), latitude.ToUtf8Bytes(), member.ToUtf8Bytes()); } public long GeoAdd(string key, params RedisGeo[] geoPoints) + { + var cmdWithArgs = GeoAddPrepareArgs(key, geoPoints); + return SendExpectLong(cmdWithArgs); + } + + private static byte[][] GeoAddPrepareArgs(string key, RedisGeo[] geoPoints) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); var members = new byte[geoPoints.Length * 3][]; for (var i = 0; i < geoPoints.Length; i++) @@ -2198,14 +2211,13 @@ public long GeoAdd(string key, params RedisGeo[] geoPoints) members[i * 3 + 2] = geoPoint.Member.ToUtf8Bytes(); } - var cmdWithArgs = MergeCommandWithArgs(Commands.GeoAdd, key.ToUtf8Bytes(), members); - return SendExpectLong(cmdWithArgs); + return MergeCommandWithArgs(Commands.GeoAdd, key.ToUtf8Bytes(), members); } public double GeoDist(string key, string fromMember, string toMember, string unit = null) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); return unit == null ? SendExpectDouble(Commands.GeoDist, key.ToUtf8Bytes(), fromMember.ToUtf8Bytes(), toMember.ToUtf8Bytes()) @@ -2215,7 +2227,7 @@ public double GeoDist(string key, string fromMember, string toMember, string uni public string[] GeoHash(string key, params string[] members) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); var cmdWithArgs = MergeCommandWithArgs(Commands.GeoHash, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); return SendExpectMultiData(cmdWithArgs).ToStringArray(); @@ -2224,10 +2236,14 @@ public string[] GeoHash(string key, params string[] members) public List GeoPos(string key, params string[] members) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); var cmdWithArgs = MergeCommandWithArgs(Commands.GeoPos, key.ToUtf8Bytes(), members.Map(x => x.ToUtf8Bytes()).ToArray()); var data = SendExpectComplexResponse(cmdWithArgs); + return GeoPosParseResult(members, data); + } + private static List GeoPosParseResult(string[] members, RedisData data) + { var to = new List(); for (var i = 0; i < members.Length; i++) @@ -2236,13 +2252,15 @@ public List GeoPos(string key, params string[] members) break; var entry = data.Children[i]; - if (entry.Children.Count == 0) + + var children = entry.Children; + if (children.Count == 0) continue; to.Add(new RedisGeo { - Longitude = double.Parse(entry.Children[0].Data.FromUtf8Bytes()), - Latitude = double.Parse(entry.Children[1].Data.FromUtf8Bytes()), + Longitude = children[0].ToDouble(), + Latitude = children[1].ToDouble(), Member = members[i], }); } @@ -2252,9 +2270,56 @@ public List GeoPos(string key, params string[] members) public List GeoRadius(string key, double longitude, double latitude, double radius, string unit, bool withCoords = false, bool withDist = false, bool withHash = false, int? count = null, bool? asc = null) + { + var cmdWithArgs = GeoRadiusPrepareArgs(key, longitude, latitude, radius, unit, + withCoords, withDist, withHash, count, asc); + + var to = new List(); + + if (!(withCoords || withDist || withHash)) + { + var members = SendExpectMultiData(cmdWithArgs).ToStringArray(); + foreach (var member in members) + { + to.Add(new RedisGeoResult { Member = member }); + } + } + else + { + var data = SendExpectComplexResponse(cmdWithArgs); + GetRadiusParseResult(unit, withCoords, withDist, withHash, to, data); + } + + return to; + } + + private static void GetRadiusParseResult(string unit, bool withCoords, bool withDist, bool withHash, List to, RedisData data) + { + foreach (var child in data.Children) + { + var i = 0; + var result = new RedisGeoResult { Unit = unit, Member = child.Children[i++].Data.FromUtf8Bytes() }; + + if (withDist) result.Distance = child.Children[i++].ToDouble(); + + if (withHash) result.Hash = child.Children[i++].ToInt64(); + + if (withCoords) + { + var children = child.Children[i].Children; + result.Longitude = children[0].ToDouble(); + result.Latitude = children[1].ToDouble(); + } + + to.Add(result); + } + } + + private static byte[][] GeoRadiusPrepareArgs(string key, double longitude, double latitude, double radius, string unit, + bool withCoords, bool withDist, bool withHash, int? count, bool? asc) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); var args = new List { @@ -2282,51 +2347,60 @@ public List GeoRadius(string key, double longitude, double latit else if (asc == false) args.Add(Commands.Desc); - var cmdWithArgs = MergeCommandWithArgs(Commands.GeoRadius, key.ToUtf8Bytes(), args.ToArray()); + return MergeCommandWithArgs(Commands.GeoRadius, key.ToUtf8Bytes(), args.ToArray()); + } + + public List GeoRadiusByMember(string key, string member, double radius, string unit, + bool withCoords = false, bool withDist = false, bool withHash = false, int? count = null, bool? asc = null) + { + var cmdWithArgs = GeoRadiusByMemberPrepareArgs(key, member, radius, unit, withCoords, withDist, withHash, count, asc); var to = new List(); if (!(withCoords || withDist || withHash)) { var members = SendExpectMultiData(cmdWithArgs).ToStringArray(); - foreach (var member in members) + foreach (var x in members) { - to.Add(new RedisGeoResult { Member = member }); + to.Add(new RedisGeoResult { Member = x }); } } else { var data = SendExpectComplexResponse(cmdWithArgs); + GeoRadiusByMemberParseResult(unit, withCoords, withDist, withHash, to, data); + } - foreach (var child in data.Children) - { - var i = 0; - var result = new RedisGeoResult { Unit = unit, Member = child.Children[i++].Data.FromUtf8Bytes() }; + return to; + } - if (withDist) - result.Distance = double.Parse(child.Children[i++].Data.FromUtf8Bytes()); + private static void GeoRadiusByMemberParseResult(string unit, bool withCoords, bool withDist, bool withHash, List to, RedisData data) + { + foreach (var child in data.Children) + { + var i = 0; + var result = new RedisGeoResult { Unit = unit, Member = child.Children[i++].Data.FromUtf8Bytes() }; - if (withHash) - result.Hash = long.Parse(child.Children[i++].Data.FromUtf8Bytes()); + if (withDist) result.Distance = child.Children[i++].ToDouble(); - if (withCoords) - { - result.Longitude = double.Parse(child.Children[i].Children[0].Data.FromUtf8Bytes()); - result.Latitude = double.Parse(child.Children[i].Children[1].Data.FromUtf8Bytes()); - } + if (withHash) result.Hash = child.Children[i++].ToInt64(); - to.Add(result); + if (withCoords) + { + var children = child.Children[i].Children; + result.Longitude = children[0].ToDouble(); + result.Latitude = children[1].ToDouble(); } - } - return to; + to.Add(result); + } } - public List GeoRadiusByMember(string key, string member, double radius, string unit, - bool withCoords = false, bool withDist = false, bool withHash = false, int? count = null, bool? asc = null) + static byte[][] GeoRadiusByMemberPrepareArgs(string key, string member, double radius, string unit, + bool withCoords, bool withDist, bool withHash, int? count, bool? asc) { if (key == null) - throw new ArgumentNullException("key"); + throw new ArgumentNullException(nameof(key)); var args = new List { @@ -2353,54 +2427,14 @@ public List GeoRadiusByMember(string key, string member, double else if (asc == false) args.Add(Commands.Desc); - var cmdWithArgs = MergeCommandWithArgs(Commands.GeoRadiusByMember, key.ToUtf8Bytes(), args.ToArray()); - - var to = new List(); - - if (!(withCoords || withDist || withHash)) - { - var members = SendExpectMultiData(cmdWithArgs).ToStringArray(); - foreach (var x in members) - { - to.Add(new RedisGeoResult { Member = x }); - } - } - else - { - var data = SendExpectComplexResponse(cmdWithArgs); - - foreach (var child in data.Children) - { - var i = 0; - var result = new RedisGeoResult { Unit = unit, Member = child.Children[i++].Data.FromUtf8Bytes() }; - - if (withDist) - result.Distance = double.Parse(child.Children[i++].Data.FromUtf8Bytes()); - - if (withHash) - result.Hash = long.Parse(child.Children[i++].Data.FromUtf8Bytes()); - - if (withCoords) - { - result.Longitude = double.Parse(child.Children[i].Children[0].Data.FromUtf8Bytes()); - result.Latitude = double.Parse(child.Children[i].Children[1].Data.FromUtf8Bytes()); - } - - to.Add(result); - } - } - - return to; + return MergeCommandWithArgs(Commands.GeoRadiusByMember, key.ToUtf8Bytes(), args.ToArray()); } #endregion internal bool IsDisposed { get; set; } - public bool IsManagedClient - { - get { return ClientManager != null; } - } + public bool IsManagedClient => ClientManager != null; public virtual void Dispose() { @@ -2454,24 +2488,21 @@ private void SafeConnectionClose() try { // workaround for a .net bug: http://support.microsoft.com/kb/821625 - if (Bstream != null) - Bstream.Close(); + bufferedReader?.Close(); } catch { } try { - if (sslStream != null) - sslStream.Close(); + sslStream?.Close(); } catch { } try { - if (socket != null) - socket.Close(); + socket?.Close(); } catch { } - Bstream = null; + bufferedReader = null; sslStream = null; socket = null; } diff --git a/src/ServiceStack.Redis/RedisNativeClient_Utils.Async.cs b/src/ServiceStack.Redis/RedisNativeClient_Utils.Async.cs new file mode 100644 index 00000000..966dbbf1 --- /dev/null +++ b/src/ServiceStack.Redis/RedisNativeClient_Utils.Async.cs @@ -0,0 +1,552 @@ +using ServiceStack.Redis.Internal; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Text; +using ServiceStack.Text.Pools; +using System; +using System.Collections.Generic; +using System.IO; +using System.Net.Sockets; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisNativeClient + { + private async ValueTask SendExpectMultiDataAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return (await SendReceiveAsync(cmdWithBinaryArgs, ReadMultiDataAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteMultiBytesQueuedCommandAsync : (Action>>)null).ConfigureAwait(false)) + ?? TypeConstants.EmptyByteArrayArray; + } + + protected ValueTask SendWithoutReadAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + => SendReceiveAsync(cmdWithBinaryArgs, null, token, null, sendWithoutRead: true).Await(); + + private ValueTask SendExpectLongAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadLongAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteLongQueuedCommandAsync : (Action>>)null); + } + + private ValueTask SendExpectDoubleAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadDoubleAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteDoubleQueuedCommandAsync : (Action>>)null); + } + protected ValueTask SendExpectStringAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + => SendExpectDataAsync(token, cmdWithBinaryArgs).FromUtf8BytesAsync(); + + private ValueTask SendExpectSuccessAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + //Turn Action into Func Hack + Action>> completePipelineFn = null; + if (PipelineAsync != null) completePipelineFn = f => { PipelineAsync.CompleteVoidQueuedCommandAsync(ct => f(ct).Await()); }; + + return SendReceiveAsync(cmdWithBinaryArgs, ExpectSuccessFnAsync, token, completePipelineFn).Await(); + } + + private ValueTask SendExpectDataAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadDataAsync, token, PipelineAsync != null ? PipelineAsync.CompleteBytesQueuedCommandAsync : (Action>>)null); + } + + private ValueTask SendExpectCodeAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ExpectCodeAsync, token, PipelineAsync != null ? PipelineAsync.CompleteStringQueuedCommandAsync : (Action>>)null); + } + + private ValueTask SendExpectScanResultAsync(CancellationToken token, byte[] cmd, params byte[][] args) + { + var cmdWithArgs = MergeCommandWithArgs(cmd, args); + return SendExpectDeeplyNestedMultiDataAsync(token, cmdWithArgs).Await(multiData => ParseScanResult(multiData)); + } + + private ValueTask SendExpectDeeplyNestedMultiDataAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + => SendReceiveAsync(cmdWithBinaryArgs, ReadDeeplyNestedMultiDataAsync, token); + + private ValueTask ReadDeeplyNestedMultiDataAsync(CancellationToken token) + => ReadDeeplyNestedMultiDataItemAsync(token).Await(result => (object[])result); + + private async ValueTask ReadDeeplyNestedMultiDataItemAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + case '$': + return await ParseSingleLineAsync(string.Concat(char.ToString((char)c), s), token).ConfigureAwait(false); + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + var array = new object[count]; + for (int i = 0; i < count; i++) + { + array[i] = await ReadDeeplyNestedMultiDataItemAsync(token).ConfigureAwait(false); + } + + return array; + } + break; + + default: + return s; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + protected ValueTask SendExpectComplexResponseAsync(CancellationToken token, params byte[][] cmdWithBinaryArgs) + { + return SendReceiveAsync(cmdWithBinaryArgs, ReadComplexResponseAsync, token, + PipelineAsync != null ? PipelineAsync.CompleteRedisDataQueuedCommandAsync : (Action>>)null); + } + + private async ValueTask ReadComplexResponseAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + case '$': + return new RedisData + { + Data = await ParseSingleLineAsync(string.Concat(char.ToString((char)c), s), token).ConfigureAwait(false) + }; + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + var ret = new RedisData { Children = new List() }; + for (var i = 0; i < count; i++) + { + ret.Children.Add(await ReadComplexResponseAsync(token).ConfigureAwait(false)); + } + + return ret; + } + break; + + default: + return new RedisData { Data = s.ToUtf8Bytes() }; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + private async ValueTask SendReceiveAsync(byte[][] cmdWithBinaryArgs, + Func> fn, + CancellationToken token, + Action>> completePipelineFn = null, + bool sendWithoutRead = false) + { + //if (TrackThread != null) + //{ + // if (TrackThread.Value.ThreadId != Thread.CurrentThread.ManagedThreadId) + // throw new InvalidAccessException(TrackThread.Value.ThreadId, TrackThread.Value.StackTrace); + //} + + var i = 0; + var didWriteToBuffer = false; + Exception originalEx = null; + + var firstAttempt = DateTime.UtcNow; + + while (true) + { + // this is deliberately *before* the try, so we never retry + // if we've been cancelled + token.ThrowIfCancellationRequested(); + try + { + if (TryConnectIfNeeded()) // TODO: asyncify + didWriteToBuffer = false; + + if (socket == null) + throw new RedisRetryableException("Socket is not connected"); + + if (!didWriteToBuffer) //only write to buffer once + { + WriteCommandToSendBuffer(cmdWithBinaryArgs); + didWriteToBuffer = true; + } + + if (PipelineAsync == null) //pipeline will handle flush if in pipeline + { + await FlushSendBufferAsync(token).ConfigureAwait(false); + } + else if (!sendWithoutRead) + { + if (completePipelineFn == null) + throw new NotSupportedException("Pipeline is not supported."); + + completePipelineFn(fn); + return default; + } + + var result = default(T); + if (fn != null) + result = await fn(token).ConfigureAwait(false); + + if (Pipeline == null) + ResetSendBuffer(); + + if (i > 0) + Interlocked.Increment(ref RedisState.TotalRetrySuccess); + + Interlocked.Increment(ref RedisState.TotalCommandsSent); + + return result; + } + catch (Exception outerEx) + { + if (log.IsDebugEnabled) + logDebug("SendReceive Exception: " + outerEx.Message); + + var retryableEx = outerEx as RedisRetryableException; + if (retryableEx == null && outerEx is RedisException + || outerEx is LicenseException) + { + ResetSendBuffer(); + throw; + } + + var ex = retryableEx ?? GetRetryableException(outerEx); + if (ex == null) + throw CreateConnectionError(originalEx ?? outerEx); + + if (originalEx == null) + originalEx = ex; + + var retry = DateTime.UtcNow - firstAttempt < retryTimeout; + if (!retry) + { + if (Pipeline == null) + ResetSendBuffer(); + + Interlocked.Increment(ref RedisState.TotalRetryTimedout); + throw CreateRetryTimeoutException(retryTimeout, originalEx); + } + + Interlocked.Increment(ref RedisState.TotalRetryCount); + await Task.Delay(GetBackOffMultiplier(++i), token).ConfigureAwait(false); + } + } + } + + internal ValueTask FlushSendBufferAsync(CancellationToken token) + { + if (currentBufferIndex > 0) + PushCurrentBuffer(); + + if (cmdBuffer.Count > 0) + { + OnBeforeFlush?.Invoke(); + + if (!Env.IsMono && sslStream == null) + { + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + { + var sb = StringBuilderCache.Allocate(); + foreach (var cmd in cmdBuffer) + { + if (sb.Length > 50) + break; + + sb.Append(Encoding.UTF8.GetString(cmd.Array, cmd.Offset, cmd.Count)); + } + logDebug("socket.Send: " + StringBuilderCache.ReturnAndFree(sb.Replace("\r\n", " ")).SafeSubstring(0, 50)); + } + + return new ValueTask(socket.SendAsync(cmdBuffer, SocketFlags.None)); + } + else + { + //Sending IList Throws 'Message to Large' SocketException in Mono + if (sslStream == null) + { + foreach (var segment in cmdBuffer) + { // TODO: what is modern Mono behavior here? + socket.Send(segment.Array, segment.Offset, segment.Count, SocketFlags.None); + } + } + else + { + return WriteAsync(sslStream, cmdBuffer, token); + } + } + } + + return default; + + static async ValueTask WriteAsync(Stream destination, List> buffer, CancellationToken token) + { + foreach (var segment in buffer) + { +#if ASYNC_MEMORY + await destination.WriteAsync(new ReadOnlyMemory(segment.Array, segment.Offset, segment.Count), token).ConfigureAwait(false); +#else + await destination.WriteAsync(segment.Array, segment.Offset, segment.Count, token).ConfigureAwait(false); +#endif + } + } + } + + + private ValueTask SafeReadByteAsync(in CancellationToken token, [CallerMemberName]string name = null) + { + AssertNotDisposed(); + + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + logDebug(name + "()"); + + return bufferedReader.ReadByteAsync(token); + } + + private async ValueTask ReadLineAsync(CancellationToken token) + { + AssertNotDisposed(); + + var sb = StringBuilderCache.Allocate(); + + int c; + while ((c = await bufferedReader.ReadByteAsync(token).ConfigureAwait(false)) != -1) + { + if (c == '\r') + continue; + if (c == '\n') + break; + sb.Append((char)c); + } + return StringBuilderCache.ReturnAndFree(sb); + } + + private async ValueTask ParseSingleLineAsync(string r, CancellationToken token) + { + if (log.IsDebugEnabled) + Log("R: {0}", r); + if (r.Length == 0) + throw CreateResponseError("Zero length response"); + + char c = r[0]; + if (c == '-') + throw CreateResponseError(r.StartsWith("-ERR") ? r.Substring(5) : r.Substring(1)); + + if (c == '$') + { + if (r == "$-1") + return null; + + if (int.TryParse(r.Substring(1), out var count)) + { + var retbuf = new byte[count]; + + var offset = 0; + while (count > 0) + { + var readCount = await bufferedReader.ReadAsync(retbuf, offset, count, token).ConfigureAwait(false); + if (readCount <= 0) + throw CreateResponseError("Unexpected end of Stream"); + + offset += readCount; + count -= readCount; + } + + if (await bufferedReader.ReadByteAsync(token).ConfigureAwait(false) != '\r' + || await bufferedReader.ReadByteAsync(token).ConfigureAwait(false) != '\n') + throw CreateResponseError("Invalid termination"); + + return retbuf; + } + throw CreateResponseError("Invalid length"); + } + + if (c == ':' || c == '+') + { + //match the return value + return r.Substring(1).ToUtf8Bytes(); + } + throw CreateResponseError("Unexpected reply: " + r); + } + + private ValueTask ReadDataAsync(CancellationToken token) + { + var pending = ReadLineAsync(token); + return pending.IsCompletedSuccessfully + ? ParseSingleLineAsync(pending.Result, token) + : Awaited(this, pending, token); + + static async ValueTask Awaited(RedisNativeClient @this, ValueTask pending, CancellationToken token) + { + var r = await pending.ConfigureAwait(false); + return await @this.ParseSingleLineAsync(r, token).ConfigureAwait(false); + } + } + + private async ValueTask ExpectCodeAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + + if (log.IsDebugEnabled) + Log((char)c + s); + + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + return s; + } + + private async ValueTask ReadMultiDataAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + + switch (c) + { + // Some commands like BRPOPLPUSH may return Bulk Reply instead of Multi-bulk + case '$': + var t = new byte[2][]; + t[1] = await ParseSingleLineAsync(string.Concat(char.ToString((char)c), s), token).ConfigureAwait(false); + return t; + + case '-': + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + case '*': + if (int.TryParse(s, out var count)) + { + if (count == -1) + { + //redis is in an invalid state + return TypeConstants.EmptyByteArrayArray; + } + + var result = new byte[count][]; + + for (int i = 0; i < count; i++) + result[i] = await ReadDataAsync(token).ConfigureAwait(false); + + return result; + } + break; + } + + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + + internal async ValueTask ReadLongAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + return ParseLong(c, await ReadLineAsync(token).ConfigureAwait(false)); + } + + private ValueTask ReadDoubleAsync(CancellationToken token) + => ReadDataAsync(token).Await(bytes => bytes == null ? double.NaN : ParseDouble(bytes)); + + internal ValueTask ExpectOkAsync(CancellationToken token) + => ExpectWordAsync(OK, token); + + internal ValueTask ExpectQueuedAsync(CancellationToken token) + => ExpectWordAsync(QUEUED, token); + + internal ValueTask ExpectSuccessFnAsync(CancellationToken token) + { + var pending = ExpectSuccessAsync(token); + return pending.IsCompletedSuccessfully ? default : Awaited(pending); + + static async ValueTask Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return 0; + } + } + + internal async ValueTask ExpectSuccessAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + + if (log.IsDebugEnabled) + Log((char)c + s); + + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") && s.Length >= 4 ? s.Substring(4) : s); + } + + + private async ValueTask ExpectWordAsync(string word, CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + + if (log.IsDebugEnabled) + Log((char)c + s); + + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + + if (s != word) + throw CreateResponseError($"Expected '{word}' got '{s}'"); + } + + internal async ValueTask ReadMultiDataResultCountAsync(CancellationToken token) + { + int c = await SafeReadByteAsync(token).ConfigureAwait(false); + if (c == -1) + throw CreateNoMoreDataError(); + + var s = await ReadLineAsync(token).ConfigureAwait(false); + if (log.IsDebugEnabled) + Log("R: {0}", s); + if (c == '-') + throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); + if (c == '*') + { + if (int.TryParse(s, out var count)) + { + return count; + } + } + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisNativeClient_Utils.cs b/src/ServiceStack.Redis/RedisNativeClient_Utils.cs index c1011a32..c8b5d19c 100644 --- a/src/ServiceStack.Redis/RedisNativeClient_Utils.cs +++ b/src/ServiceStack.Redis/RedisNativeClient_Utils.cs @@ -18,11 +18,15 @@ using System.Net; using System.Net.Security; using System.Net.Sockets; +using System.Runtime.CompilerServices; +using System.Security.Authentication; using System.Security.Cryptography; +using System.Security.Cryptography.X509Certificates; using System.Text; using System.Threading; using System.Threading.Tasks; using ServiceStack.Text; +using ServiceStack.Text.Pools; namespace ServiceStack.Redis { @@ -33,14 +37,19 @@ public partial class RedisNativeClient private static Timer UsageTimer; private static int __requestsPerHour = 0; - public static int RequestsPerHour - { - get { return __requestsPerHour; } - } + public static int RequestsPerHour => __requestsPerHour; private const int Unknown = -1; public static int ServerVersionNumber { get; set; } + private static long IdCounter = 0; + public long ClientId { get; } = Interlocked.Increment(ref IdCounter); + + private string LogPrefix = string.Empty; + private void logDebug(object message) => log.Debug(LogPrefix + message); + private void logError(object message) => log.Error(LogPrefix + message); + private void logError(object message, Exception ex) => log.Error(LogPrefix + message, ex); + public int AssertServerVersionNumber() { if (ServerVersionNumber == 0) @@ -81,31 +90,46 @@ private void Connect() SendTimeout = SendTimeout, ReceiveTimeout = ReceiveTimeout }; +#if DEBUG + // allow sync commands during connect (we're OK with sync for connect; the + // DebugAllowSync feature being used here only impacts tests) + var oldDebugAllowSync = DebugAllowSync; + DebugAllowSync = true; +#endif try { -#if NETSTANDARD1_3 - var addresses = Dns.GetHostAddressesAsync(Host).Result; - socket.Connect(addresses.FirstOrDefault(a => a.AddressFamily == AddressFamily.InterNetwork), Port); -#else + if (log.IsDebugEnabled) + { + var type = ConnectTimeout <= 0 ? "sync" : "async"; + logDebug($"Attempting {type} connection to '{Host}:{Port}' (SEND {SendTimeout}, RECV {ReceiveTimeout} Timeouts)..."); + } + if (ConnectTimeout <= 0) { socket.Connect(Host, Port); } else { - var connectResult = socket.BeginConnect(Host, Port, null, null); + var connectResult = IPAddress.TryParse(Host, out var ip) + ? socket.BeginConnect(ip, Port, null, null) + : socket.BeginConnect(Host, Port, null, null); connectResult.AsyncWaitHandle.WaitOne(ConnectTimeout, true); } -#endif if (!socket.Connected) { + if (log.IsDebugEnabled) + logDebug($"Socket failed connect to '{Host}:{Port}' (ConnectTimeout {ConnectTimeout})"); + socket.Close(); socket = null; DeactivatedAt = DateTime.UtcNow; return; } + if (log.IsDebugEnabled) + logDebug($"Socket connected to '{Host}:{Port}'"); + Stream networkStream = new NetworkStream(socket); if (Ssl) @@ -120,7 +144,14 @@ private void Connect() } else { - var ctor = typeof(SslStream).GetAllConstructors() +#if NETSTANDARD || NET472 + sslStream = new SslStream(networkStream, + leaveInnerStreamOpen: false, + userCertificateValidationCallback: RedisConfig.CertificateValidationCallback, + userCertificateSelectionCallback: RedisConfig.CertificateSelectionCallback, + encryptionPolicy: EncryptionPolicy.RequireEncryption); +#else + var ctor = typeof(SslStream).GetConstructors() .First(x => x.GetParameters().Length == 5); var policyType = AssemblyUtils.FindType("System.Net.Security.EncryptionPolicy"); @@ -133,21 +164,38 @@ private void Connect() RedisConfig.CertificateSelectionCallback, policyValue, }); +#endif } -#if NETSTANDARD1_3 - sslStream.AuthenticateAsClientAsync(Host).Wait(); +#if NETSTANDARD || NET472 + var task = sslStream.AuthenticateAsClientAsync(Host); + if (ConnectTimeout > 0) + { + task.Wait(ConnectTimeout); + } + else + { + task.Wait(); + } #else - sslStream.AuthenticateAsClient(Host); + if (SslProtocols != null) + { + sslStream.AuthenticateAsClient(Host, new X509CertificateCollection(), + SslProtocols ?? System.Security.Authentication.SslProtocols.None, checkCertificateRevocation: true); + } + else + { + sslStream.AuthenticateAsClient(Host); + } #endif if (!sslStream.IsEncrypted) - throw new Exception("Could not establish an encrypted connection to " + Host); + throw new Exception($"Could not establish an encrypted connection to '{Host}:{Port}'"); networkStream = sslStream; } - Bstream = new BufferedStream(networkStream, 16 * 1024); + bufferedReader = new BufferedReader(networkStream, 16 * 1024); if (!string.IsNullOrEmpty(Password)) SendUnmanagedExpectSuccess(Commands.Auth, Password.ToUtf8Bytes()); @@ -185,8 +233,7 @@ private void Connect() return; } - var ipEndpoint = socket.LocalEndPoint as IPEndPoint; - clientPort = ipEndpoint != null ? ipEndpoint.Port : -1; + clientPort = socket.LocalEndPoint is IPEndPoint ipEndpoint ? ipEndpoint.Port : -1; lastCommand = null; lastSocketException = null; LastConnectedAtTimestamp = Stopwatch.GetTimestamp(); @@ -194,15 +241,19 @@ private void Connect() OnConnected(); if (ConnectionFilter != null) - { ConnectionFilter(this); - } } catch (SocketException) { - log.Error(ErrorConnect.Fmt(Host, Port)); + logError(ErrorConnect.Fmt(Host, Port)); throw; } + finally + { +#if DEBUG + DebugAllowSync = oldDebugAllowSync; +#endif + } } public static string ErrorConnect = "Could not connect to redis Instance at {0}:{1}"; @@ -213,10 +264,13 @@ public virtual void OnConnected() protected string ReadLine() { + AssertNotDisposed(); + AssertNotAsyncOnly(); + var sb = StringBuilderCache.Allocate(); int c; - while ((c = Bstream.ReadByte()) != -1) + while ((c = bufferedReader.ReadByte()) != -1) { if (c == '\r') continue; @@ -227,15 +281,14 @@ protected string ReadLine() return StringBuilderCache.ReturnAndFree(sb); } - public bool HasConnected - { - get { return socket != null; } - } + public bool HasConnected => socket != null; public bool IsSocketConnected() { + if (socket == null) + return false; var part1 = socket.Poll(1000, SelectMode.SelectRead); - var part2 = (socket.Available == 0); + var part2 = socket.Available == 0; return !(part1 & part2); } @@ -249,23 +302,23 @@ internal bool AssertConnectedSocket() } catch (SocketException ex) { - log.Error(ErrorConnect.Fmt(Host, Port)); + logError(ErrorConnect.Fmt(Host, Port)); - if (socket != null) - socket.Close(); + socket?.Close(); socket = null; DeactivatedAt = DateTime.UtcNow; var message = Host + ":" + Port; var throwEx = new RedisException(message, ex); - log.Error(throwEx.Message, ex); + logError(throwEx.Message, ex); throw throwEx; } } - private void TryConnectIfNeeded() + private bool TryConnectIfNeeded() { + bool didConnect = false; if (LastConnectedAtTimestamp > 0) { var now = Stopwatch.GetTimestamp(); @@ -274,6 +327,7 @@ private void TryConnectIfNeeded() if (socket == null || (elapsedSecs > IdleTimeOutSecs && !socket.IsConnected())) { Reconnect(); + didConnect = true; } LastConnectedAtTimestamp = now; } @@ -281,17 +335,16 @@ private void TryConnectIfNeeded() if (socket == null) { Connect(); + didConnect = true; } + + return didConnect; } private bool Reconnect() { - var previousDb = db; - SafeConnectionClose(); - Connect(); //sets db to 0 - - if (previousDb != RedisConfig.DefaultDb) this.Db = previousDb; + Connect(); //sets db return socket != null; } @@ -300,18 +353,18 @@ private RedisResponseException CreateResponseError(string error) { DeactivatedAt = DateTime.UtcNow; - if (!RedisConfig.DisableVerboseLogging) + if (RedisConfig.EnableVerboseLogging) { var safeLastCommand = string.IsNullOrEmpty(Password) ? lastCommand : (lastCommand ?? "").Replace(Password, ""); if (!string.IsNullOrEmpty(safeLastCommand)) - error = string.Format("{0}, LastCommand:'{1}', srcPort:{2}", error, safeLastCommand, clientPort); + error = $"{error}, LastCommand:'{safeLastCommand}', srcPort:{clientPort}"; } var throwEx = new RedisResponseException(error); - log.Error(error); + logError(error); return throwEx; } @@ -325,22 +378,19 @@ private RedisRetryableException CreateRetryableResponseError(string error) { string safeLastCommand = string.IsNullOrEmpty(Password) ? lastCommand : (lastCommand ?? "").Replace(Password, ""); - var throwEx = new RedisRetryableException(string.Format("[{0}] {1}, sPort: {2}, LastCommand: {3}", - DateTime.UtcNow.ToString("HH:mm:ss.fff"), - error, clientPort, safeLastCommand)); - log.Error(throwEx.Message); + var throwEx = new RedisRetryableException( + $"[{DateTime.UtcNow:HH:mm:ss.fff}] {error}, sPort: {clientPort}, LastCommand: {safeLastCommand}"); + logError(throwEx.Message); throw throwEx; } private RedisException CreateConnectionError(Exception originalEx) { DeactivatedAt = DateTime.UtcNow; - var throwEx = new RedisException(string.Format("[{0}] Unable to Connect: sPort: {1}{2}", - DateTime.UtcNow.ToString("HH:mm:ss.fff"), - clientPort, - originalEx != null ? ", Error: " + originalEx.Message + "\n" + originalEx.StackTrace : ""), + var throwEx = new RedisException( + $"[{DateTime.UtcNow:HH:mm:ss.fff}] Unable to Connect: sPort: {clientPort}{(originalEx != null ? ", Error: " + originalEx.Message + "\n" + originalEx.StackTrace : "")}", originalEx ?? lastSocketException); - log.Error(throwEx.Message); + logError(throwEx.Message); throw throwEx; } @@ -362,7 +412,7 @@ private static byte[] GetCmdBytes(char cmdPrefix, int noOfLines) } /// - /// Command to set multuple binary safe arguments + /// Command to set multiple binary safe arguments /// /// /// @@ -375,7 +425,7 @@ protected void WriteCommandToSendBuffer(params byte[][] cmdWithBinaryArgs) LicenseUtils.AssertValidUsage(LicenseFeature.Redis, QuotaType.RequestsPerHour, __requestsPerHour); } - if (log.IsDebugEnabled && !RedisConfig.DisableVerboseLogging) + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) CmdLog(cmdWithBinaryArgs); //Total command lines count @@ -394,8 +444,11 @@ protected void SendUnmanagedExpectSuccess(params byte[][] cmdWithBinaryArgs) { bytes = bytes.Combine(GetCmdBytes('$', safeBinaryValue.Length), safeBinaryValue, endData); } + + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + logDebug("stream.Write: " + Encoding.UTF8.GetString(bytes, 0, Math.Min(bytes.Length, 50)).Replace("\r\n"," ").SafeSubstring(0,50)); - Bstream.Write(bytes, 0, bytes.Length); + SendDirectToSocket(new ArraySegment(bytes, 0, bytes.Length)); ExpectSuccess(); } @@ -412,7 +465,9 @@ public void WriteAllToSendBuffer(params byte[][] cmdWithBinaryArgs) } } - readonly IList> cmdBuffer = new List>(); + // trated as List rather than IList to avoid allocs during foreach + readonly List> cmdBuffer = new List>(); + byte[] currentBuffer = BufferPool.GetBuffer(); int currentBufferIndex; @@ -473,27 +528,69 @@ internal void FlushSendBuffer() if (!Env.IsMono && sslStream == null) { + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + { + var sb = StringBuilderCache.Allocate(); + foreach (var cmd in cmdBuffer) + { + if (sb.Length > 50) + break; + + sb.Append(Encoding.UTF8.GetString(cmd.Array, cmd.Offset, cmd.Count)); + } + logDebug("socket.Send: " + StringBuilderCache.ReturnAndFree(sb.Replace("\r\n", " ")).SafeSubstring(0,50)); + } + socket.Send(cmdBuffer); //Optimized for Windows } else { - //Sendling IList Throws 'Message to Large' SocketException in Mono + //Sending IList Throws 'Message to Large' SocketException in Mono foreach (var segment in cmdBuffer) { - var buffer = segment.Array; - if (sslStream == null) - { - socket.Send(buffer, segment.Offset, segment.Count, SocketFlags.None); - } - else - { - sslStream.Write(buffer, segment.Offset, segment.Count); - } + SendDirectToSocket(segment); } } } } + private void SendDirectToSocket(ArraySegment segment) + { + if (sslStream == null) + { + socket.Send(segment.Array, segment.Offset, segment.Count, SocketFlags.None); + } + else + { + sslStream.Write(segment.Array, segment.Offset, segment.Count); + } + } + + /// + /// Called before returning pooled client/socket + /// + internal void Activate(bool newClient=false) + { + if (!newClient) + { + //Drain any existing buffers + ResetSendBuffer(); + bufferedReader?.Reset(); + if (socket?.Available > 0) + { + logDebug($"Draining existing socket of {socket.Available} bytes"); + var buff = new byte[socket.Available]; + socket.Receive(buff, SocketFlags.None); + } + } + Active = true; + } + + internal void Deactivate() + { + Active = false; + } + /// /// reset buffer index in send buffer /// @@ -508,17 +605,52 @@ public void ResetSendBuffer() } } - private int SafeReadByte() + [MethodImpl(MethodImplOptions.AggressiveInlining)] + void AssertNotDisposed() + { + if (bufferedReader == null) + throw new ObjectDisposedException($"Redis Client {ClientId} is Disposed"); + } + + private int SafeReadByte(string name) { - return Bstream.ReadByte(); + AssertNotDisposed(); + AssertNotAsyncOnly(); + + if (log.IsDebugEnabled && RedisConfig.EnableVerboseLogging) + logDebug(name + "()"); + + return bufferedReader.ReadByte(); } + internal TrackThread? TrackThread; + + partial void AssertNotAsyncOnly([CallerMemberName] string caller = default); +#if DEBUG + public bool DebugAllowSync { get; set; } = true; + partial void AssertNotAsyncOnly(string caller) + { + // for unit tests only; asserts that we're not meant to be in an async context + if (!DebugAllowSync) + throw new InvalidOperationException("Unexpected synchronous operation detected from '" + caller + "'"); + } +#endif + + protected T SendReceive(byte[][] cmdWithBinaryArgs, Func fn, Action> completePipelineFn = null, bool sendWithoutRead = false) { + if (Pipeline is null) AssertNotAsyncOnly(); + if (TrackThread != null) + { + if (TrackThread.Value.ThreadId != Thread.CurrentThread.ManagedThreadId) + throw new InvalidAccessException(TrackThread.Value.ThreadId, TrackThread.Value.StackTrace); + } + var i = 0; + var didWriteToBuffer = false; Exception originalEx = null; var firstAttempt = DateTime.UtcNow; @@ -527,13 +659,17 @@ protected T SendReceive(byte[][] cmdWithBinaryArgs, { try { - TryConnectIfNeeded(); + if (TryConnectIfNeeded()) + didWriteToBuffer = false; if (socket == null) throw new RedisRetryableException("Socket is not connected"); - if (i == 0) //only write to buffer once + if (!didWriteToBuffer) //only write to buffer once + { WriteCommandToSendBuffer(cmdWithBinaryArgs); + didWriteToBuffer = true; + } if (Pipeline == null) //pipeline will handle flush if in pipeline { @@ -564,6 +700,9 @@ protected T SendReceive(byte[][] cmdWithBinaryArgs, } catch (Exception outerEx) { + if (log.IsDebugEnabled) + logDebug("SendReceive Exception: " + outerEx.Message); + var retryableEx = outerEx as RedisRetryableException; if (retryableEx == null && outerEx is RedisException || outerEx is LicenseException) @@ -599,7 +738,7 @@ private RedisException CreateRetryTimeoutException(TimeSpan retryTimeout, Except { DeactivatedAt = DateTime.UtcNow; var message = "Exceeded timeout of {0}".Fmt(retryTimeout); - log.Error(message); + logError(message); return new RedisException(message, originalEx); } @@ -612,11 +751,10 @@ private Exception GetRetryableException(Exception outerEx) if (socketEx == null) return null; - log.Error("SocketException in SendReceive, retrying...", socketEx); + logError("SocketException in SendReceive, retrying...", socketEx); lastSocketException = socketEx; - if (socket != null) - socket.Close(); + socket?.Close(); socket = null; return socketEx; @@ -729,10 +867,10 @@ protected string SendExpectString(params byte[][] cmdWithBinaryArgs) protected void Log(string fmt, params object[] args) { - if (RedisConfig.DisableVerboseLogging) + if (!RedisConfig.EnableVerboseLogging) return; - log.DebugFormat("{0}", string.Format(fmt, args).Trim()); + log.DebugFormat(LogPrefix + "{0}", string.Format(fmt, args).Trim()); } protected void CmdLog(byte[][] args) @@ -757,7 +895,7 @@ protected void CmdLog(byte[][] args) this.lastCommand = this.lastCommand.Substring(0, 100) + "..."; } - log.Debug("S: " + this.lastCommand); + logDebug("S: " + this.lastCommand); } //Turn Action into Func Hack @@ -769,7 +907,7 @@ protected long ExpectSuccessFn() protected void ExpectSuccess() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ExpectSuccess)); if (c == -1) throw CreateNoMoreDataError(); @@ -784,7 +922,7 @@ protected void ExpectSuccess() private void ExpectWord(string word) { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ExpectWord)); if (c == -1) throw CreateNoMoreDataError(); @@ -797,12 +935,12 @@ private void ExpectWord(string word) throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); if (s != word) - throw CreateResponseError(string.Format("Expected '{0}' got '{1}'", word, s)); + throw CreateResponseError($"Expected '{word}' got '{s}'"); } private string ExpectCode() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ExpectCode)); if (c == -1) throw CreateNoMoreDataError(); @@ -829,12 +967,15 @@ internal void ExpectQueued() public long ReadLong() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadLong)); if (c == -1) throw CreateNoMoreDataError(); - var s = ReadLine(); + return ParseLong(c, ReadLine()); + } + private long ParseLong(int c, string s) + { if (log.IsDebugEnabled) Log("R: {0}", s); @@ -847,7 +988,7 @@ public long ReadLong() if (long.TryParse(s, out i)) return i; } - throw CreateResponseError("Unknown reply on integer response: " + c + s); + throw CreateResponseError("Unknown reply on integer response: " + ((char)c) + s); // c here is the protocol prefix } public double ReadDouble() @@ -859,10 +1000,7 @@ public double ReadDouble() public static double ParseDouble(byte[] doubleBytes) { var doubleString = Encoding.UTF8.GetString(doubleBytes); - - double d; - double.TryParse(doubleString, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out d); - + double.TryParse(doubleString, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var d); return d; } @@ -887,16 +1025,15 @@ private byte[] ParseSingleLine(string r) { if (r == "$-1") return null; - int count; - if (int.TryParse(r.Substring(1), out count)) + if (int.TryParse(r.Substring(1), out var count)) { var retbuf = new byte[count]; var offset = 0; while (count > 0) { - var readCount = Bstream.Read(retbuf, offset, count); + var readCount = bufferedReader.Read(retbuf, offset, count); if (readCount <= 0) throw CreateResponseError("Unexpected end of Stream"); @@ -904,7 +1041,7 @@ private byte[] ParseSingleLine(string r) count -= readCount; } - if (Bstream.ReadByte() != '\r' || Bstream.ReadByte() != '\n') + if (bufferedReader.ReadByte() != '\r' || bufferedReader.ReadByte() != '\n') throw CreateResponseError("Invalid termination"); return retbuf; @@ -922,7 +1059,7 @@ private byte[] ParseSingleLine(string r) private byte[][] ReadMultiData() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadMultiData)); if (c == -1) throw CreateNoMoreDataError(); @@ -942,8 +1079,7 @@ private byte[][] ReadMultiData() throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); case '*': - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { if (count == -1) { @@ -961,7 +1097,7 @@ private byte[][] ReadMultiData() break; } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } private object[] ReadDeeplyNestedMultiData() @@ -972,7 +1108,7 @@ private object[] ReadDeeplyNestedMultiData() private object ReadDeeplyNestedMultiDataItem() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadDeeplyNestedMultiDataItem)); if (c == -1) throw CreateNoMoreDataError(); @@ -989,8 +1125,7 @@ private object ReadDeeplyNestedMultiDataItem() throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); case '*': - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { var array = new object[count]; for (int i = 0; i < count; i++) @@ -1006,12 +1141,12 @@ private object ReadDeeplyNestedMultiDataItem() return s; } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } internal RedisData ReadComplexResponse() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadComplexResponse)); if (c == -1) throw CreateNoMoreDataError(); @@ -1031,8 +1166,7 @@ internal RedisData ReadComplexResponse() throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); case '*': - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { var ret = new RedisData { Children = new List() }; for (var i = 0; i < count; i++) @@ -1048,12 +1182,12 @@ internal RedisData ReadComplexResponse() return new RedisData { Data = s.ToUtf8Bytes() }; } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } internal int ReadMultiDataResultCount() { - int c = SafeReadByte(); + int c = SafeReadByte(nameof(ReadMultiDataResultCount)); if (c == -1) throw CreateNoMoreDataError(); @@ -1064,21 +1198,20 @@ internal int ReadMultiDataResultCount() throw CreateResponseError(s.StartsWith("ERR") ? s.Substring(4) : s); if (c == '*') { - int count; - if (int.TryParse(s, out count)) + if (int.TryParse(s, out var count)) { return count; } } - throw CreateResponseError("Unknown reply on multi-request: " + c + s); + throw CreateResponseError("Unknown reply on multi-request: " + ((char)c) + s); // c here is the protocol prefix } private static void AssertListIdAndValue(string listId, byte[] value) { if (listId == null) - throw new ArgumentNullException("listId"); + throw new ArgumentNullException(nameof(listId)); if (value == null) - throw new ArgumentNullException("value"); + throw new ArgumentNullException(nameof(value)); } private static byte[][] MergeCommandWithKeysAndValues(byte[] cmd, byte[][] keys, byte[][] values) @@ -1097,13 +1230,13 @@ private static byte[][] MergeCommandWithKeysAndValues(byte[][] firstParams, byte[][] keys, byte[][] values) { if (keys == null || keys.Length == 0) - throw new ArgumentNullException("keys"); + throw new ArgumentNullException(nameof(keys)); if (values == null || values.Length == 0) - throw new ArgumentNullException("values"); + throw new ArgumentNullException(nameof(values)); if (keys.Length != values.Length) throw new ArgumentException("The number of values must be equal to the number of keys"); - var keyValueStartIndex = (firstParams != null) ? firstParams.Length : 0; + var keyValueStartIndex = firstParams?.Length ?? 0; var keysAndValuesLength = keys.Length * 2 + keyValueStartIndex; var keysAndValues = new byte[keysAndValuesLength][]; @@ -1183,7 +1316,7 @@ protected byte[][] MergeAndConvertToBytes(string[] keys, string[] args) public long EvalInt(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectLong(cmdArgs); @@ -1192,7 +1325,7 @@ public long EvalInt(string luaBody, int numberKeysInArgs, params byte[][] keys) public long EvalShaInt(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectLong(cmdArgs); @@ -1201,7 +1334,7 @@ public long EvalShaInt(string sha1, int numberKeysInArgs, params byte[][] keys) public string EvalStr(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectData(cmdArgs).FromUtf8Bytes(); @@ -1210,7 +1343,7 @@ public string EvalStr(string luaBody, int numberKeysInArgs, params byte[][] keys public string EvalShaStr(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectData(cmdArgs).FromUtf8Bytes(); @@ -1219,7 +1352,7 @@ public string EvalShaStr(string sha1, int numberKeysInArgs, params byte[][] keys public byte[][] Eval(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectMultiData(cmdArgs); @@ -1228,7 +1361,7 @@ public byte[][] Eval(string luaBody, int numberKeysInArgs, params byte[][] keys) public byte[][] EvalSha(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return SendExpectMultiData(cmdArgs); @@ -1237,7 +1370,7 @@ public byte[][] EvalSha(string sha1, int numberKeysInArgs, params byte[][] keys) public RedisData EvalCommand(string luaBody, int numberKeysInArgs, params byte[][] keys) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Eval, luaBody.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return RawCommand(cmdArgs); @@ -1246,7 +1379,7 @@ public RedisData EvalCommand(string luaBody, int numberKeysInArgs, params byte[] public RedisData EvalShaCommand(string sha1, int numberKeysInArgs, params byte[][] keys) { if (sha1 == null) - throw new ArgumentNullException("sha1"); + throw new ArgumentNullException(nameof(sha1)); var cmdArgs = MergeCommandWithArgs(Commands.EvalSha, sha1.ToUtf8Bytes(), keys.PrependInt(numberKeysInArgs)); return RawCommand(cmdArgs); @@ -1255,7 +1388,7 @@ public RedisData EvalShaCommand(string sha1, int numberKeysInArgs, params byte[] public string CalculateSha1(string luaBody) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); byte[] buffer = Encoding.UTF8.GetBytes(luaBody); return BitConverter.ToString(buffer.ToSha1Hash()).Replace("-", ""); @@ -1264,7 +1397,7 @@ public string CalculateSha1(string luaBody) public byte[] ScriptLoad(string luaBody) { if (luaBody == null) - throw new ArgumentNullException("luaBody"); + throw new ArgumentNullException(nameof(luaBody)); var cmdArgs = MergeCommandWithArgs(Commands.Script, Commands.Load, luaBody.ToUtf8Bytes()); return SendExpectData(cmdArgs); diff --git a/src/ServiceStack.Redis/RedisPipeline.cs b/src/ServiceStack.Redis/RedisPipeline.cs deleted file mode 100644 index 0a942352..00000000 --- a/src/ServiceStack.Redis/RedisPipeline.cs +++ /dev/null @@ -1,47 +0,0 @@ -#if !NETSTANDARD1_3 -using System; -using System.Collections.Generic; -using System.Linq; - -namespace ServiceStack.Redis -{ - public class RedisPipelineCommand - { - private readonly RedisNativeClient client; - private int cmdCount; - - public RedisPipelineCommand(RedisNativeClient client) - { - this.client = client; - } - - public void WriteCommand(params byte[][] cmdWithBinaryArgs) - { - client.WriteAllToSendBuffer(cmdWithBinaryArgs); - cmdCount++; - } - - public List ReadAllAsInts() - { - var results = new List(); - while (cmdCount-- > 0) - { - results.Add(client.ReadInt()); - } - - return results; - } - - public bool ReadAllAsIntsHaveSuccess() - { - var allResults = ReadAllAsInts(); - return allResults.All(x => x == RedisNativeClient.Success); - } - - public void Flush() - { - client.FlushSendBuffer(); - } - } -} -#endif diff --git a/src/ServiceStack.Redis/RedisPubSubServer.cs b/src/ServiceStack.Redis/RedisPubSubServer.cs index d3604b39..a1d4fd17 100644 --- a/src/ServiceStack.Redis/RedisPubSubServer.cs +++ b/src/ServiceStack.Redis/RedisPubSubServer.cs @@ -1,8 +1,6 @@ using System; using System.Diagnostics; -using System.Text; using System.Threading; -using System.Threading.Tasks; using ServiceStack.Logging; using ServiceStack.Text; @@ -25,9 +23,16 @@ public class RedisPubSubServer : IRedisPubSubServer public Action OnHeartbeatReceived { get; set; } public Action OnStop { get; set; } public Action OnDispose { get; set; } + + /// + /// Callback fired on each message received, handle with (channel, msg) => ... + /// public Action OnMessage { get; set; } + public Action OnMessageBytes { get; set; } + public Action OnControlCommand { get; set; } public Action OnUnSubscribe { get; set; } + public Action OnEvent { get; set; } public Action OnError { get; set; } public Action OnFailover { get; set; } public bool IsSentinelSubscription { get; set; } @@ -41,9 +46,7 @@ public class RedisPubSubServer : IRedisPubSubServer private int noOfContinuousErrors = 0; private string lastExMsg = null; private int status; -#if !NETSTANDARD1_3 private Thread bgThread; //Subscription controller thread -#endif private long bgThreadCount = 0; private const int NO = 0; @@ -52,19 +55,13 @@ public class RedisPubSubServer : IRedisPubSubServer private int autoRestart = YES; public bool AutoRestart { - get { return Interlocked.CompareExchange(ref autoRestart, 0, 0) == YES; } - set { Interlocked.CompareExchange(ref autoRestart, value ? YES : NO, autoRestart); } + get => Interlocked.CompareExchange(ref autoRestart, 0, 0) == YES; + set => Interlocked.CompareExchange(ref autoRestart, value ? YES : NO, autoRestart); } - public DateTime CurrentServerTime - { - get { return new DateTime(serverTimeAtStart.Ticks + startedAt.ElapsedTicks, DateTimeKind.Utc); } - } + public DateTime CurrentServerTime => new DateTime(serverTimeAtStart.Ticks + startedAt.Elapsed.Ticks, DateTimeKind.Utc); - public long BgThreadCount - { - get { return Interlocked.CompareExchange(ref bgThreadCount, 0, 0); } - } + public long BgThreadCount => Interlocked.CompareExchange(ref bgThreadCount, 0, 0); public const string AllChannelsWildCard = "*"; public IRedisClientsManager ClientsManager { get; set; } @@ -76,12 +73,10 @@ public RedisPubSubServer(IRedisClientsManager clientsManager, params string[] ch { this.ClientsManager = clientsManager; this.Channels = channels; + startedAt = Stopwatch.StartNew(); var failoverHost = clientsManager as IRedisFailover; - if (failoverHost != null) - { - failoverHost.OnFailover.Add(HandleFailover); - } + failoverHost?.OnFailover.Add(HandleFailover); } public IRedisPubSubServer Start() @@ -91,8 +86,7 @@ public IRedisPubSubServer Start() if (Interlocked.CompareExchange(ref status, 0, 0) == Status.Started) { //Start any stopped worker threads - if (OnStart != null) - OnStart(); + OnStart?.Invoke(); return this; } @@ -102,18 +96,31 @@ public IRedisPubSubServer Start() //Only 1 thread allowed past if (Interlocked.CompareExchange(ref status, Status.Starting, Status.Stopped) == Status.Stopped) //Should only be 1 thread past this point { - try + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} Stopped] Start()> Stopped -> Starting"); + + var initErrors = 0; + bool hasInit = false; + while (!hasInit) { - Init(); + try + { + Init(); + hasInit = true; + } + catch (Exception ex) + { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] Start().Init()> Exception: {ex.Message}"); + OnError?.Invoke(ex); + SleepBackOffMultiplier(initErrors++); + } + } + try + { SleepBackOffMultiplier(Interlocked.CompareExchange(ref noOfContinuousErrors, 0, 0)); - if (OnStart != null) - OnStart(); + OnStart?.Invoke(); -#if NETSTANDARD1_3 - RunLoop(); -#else //Don't kill us if we're the thread that's retrying to Start() after a failure. if (bgThread != Thread.CurrentThread) { @@ -134,12 +141,10 @@ public IRedisPubSubServer Start() Log.Debug("Retrying RunLoop() on Thread: " + bgThread.Name); RunLoop(); } -#endif } catch (Exception ex) { - if (this.OnError != null) - this.OnError(ex); + OnError?.Invoke(ex); } } @@ -148,20 +153,12 @@ public IRedisPubSubServer Start() private void Init() { - try + using (var redis = ClientsManager.GetReadOnlyClient()) { - using (var redis = ClientsManager.GetReadOnlyClient()) - { - startedAt = Stopwatch.StartNew(); - serverTimeAtStart = IsSentinelSubscription - ? DateTime.UtcNow - : redis.GetServerTime(); - } - } - catch (Exception ex) - { - if (OnError != null) - OnError(ex); + startedAt = Stopwatch.StartNew(); + serverTimeAtStart = IsSentinelSubscription + ? DateTime.UtcNow + : redis.GetServerTime(); } DisposeHeartbeatTimer(); @@ -169,13 +166,12 @@ private void Init() if (HeartbeatInterval != null) { heartbeatTimer = new Timer(SendHeartbeat, null, - TimeSpan.FromMilliseconds(0), HeartbeatInterval.Value); + TimeSpan.FromMilliseconds(0), HeartbeatInterval.GetValueOrDefault()); } Interlocked.CompareExchange(ref lastHeartbeatTicks, DateTime.UtcNow.Ticks, lastHeartbeatTicks); - if (OnInit != null) - OnInit(); + OnInit?.Invoke(); } void SendHeartbeat(object state) @@ -184,17 +180,18 @@ void SendHeartbeat(object state) if (currentStatus != Status.Started) return; - if (DateTime.UtcNow - new DateTime(lastHeartbeatTicks) < HeartbeatInterval.Value) + if (DateTime.UtcNow - new DateTime(lastHeartbeatTicks) < HeartbeatInterval.GetValueOrDefault()) return; - if (OnHeartbeatSent != null) - OnHeartbeatSent(); + OnHeartbeatSent?.Invoke(); NotifyAllSubscribers(ControlCommand.Pulse); if (DateTime.UtcNow - new DateTime(lastHeartbeatTicks) > HeartbeatTimeout) { currentStatus = Interlocked.CompareExchange(ref status, 0, 0); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {Status.GetStatus(currentStatus)}] SendHeartbeat()> Exceeded HeartbeatTimeout"); if (currentStatus == Status.Started) { Restart(); @@ -206,8 +203,7 @@ void Pulse() { Interlocked.CompareExchange(ref lastHeartbeatTicks, DateTime.UtcNow.Ticks, lastHeartbeatTicks); - if (OnHeartbeatReceived != null) - OnHeartbeatReceived(); + OnHeartbeatReceived?.Invoke(); } private void DisposeHeartbeatTimer() @@ -217,11 +213,14 @@ private void DisposeHeartbeatTimer() try { + if (Log.IsDebugEnabled) + Log.Debug("RedisPubServer.DisposeHeartbeatTimer()"); + heartbeatTimer.Dispose(); } catch (Exception ex) { - if (this.OnError != null) this.OnError(ex); + OnError?.Invoke(ex); } heartbeatTimer = null; } @@ -232,111 +231,139 @@ private void RunLoop() if (Interlocked.CompareExchange(ref status, Status.Started, Status.Starting) != Status.Starting) return; Interlocked.Increment(ref timesStarted); + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} Started] RunLoop().Stop> Starting -> Started, timesStarted: {timesStarted}"); + try { //RESET while (Interlocked.CompareExchange(ref status, 0, 0) == Status.Started) { - using (var redis = ClientsManager.GetReadOnlyClient()) + using var redis = ClientsManager.GetReadOnlyClient(); + masterClient = redis; + + //Record that we had a good run... + Interlocked.CompareExchange(ref noOfContinuousErrors, 0, noOfContinuousErrors); + + using var subscription = redis.CreateSubscription(); + subscription.OnUnSubscribe = HandleUnSubscribe; + + if (OnMessageBytes != null) { - masterClient = redis; + bool IsCtrlMessage(byte[] msg) + { + if (msg.Length < 4) + return false; + return msg[0] == 'C' && msg[1] == 'T' && msg[0] == 'R' && msg[0] == 'L'; + } + + ((RedisSubscription)subscription).OnMessageBytes = (channel, msg) => { + if (IsCtrlMessage(msg)) + return; - //Record that we had a good run... - Interlocked.CompareExchange(ref noOfContinuousErrors, 0, noOfContinuousErrors); + OnMessageBytes(channel, msg); + }; + } + + subscription.OnMessage = (channel, msg) => + { + if (string.IsNullOrEmpty(msg)) + return; - using (var subscription = redis.CreateSubscription()) + var ctrlMsg = msg.LeftPart(':'); + if (ctrlMsg == ControlCommand.Control) { - subscription.OnUnSubscribe = HandleUnSubscribe; + var op = Interlocked.CompareExchange(ref doOperation, Operation.NoOp, doOperation); + + var msgType = msg.IndexOf(':') >= 0 + ? msg.RightPart(':') + : null; - subscription.OnMessage = (channel, msg) => + OnControlCommand?.Invoke(msgType ?? Operation.GetName(op)); + + switch (op) { - if (string.IsNullOrEmpty(msg)) - return; + case Operation.Stop: + if (Log.IsDebugEnabled) + Log.Debug("Stop Command Issued"); - var ctrlMsg = msg.SplitOnFirst(':'); - if (ctrlMsg[0] == ControlCommand.Control) - { - var op = Interlocked.CompareExchange(ref doOperation, Operation.NoOp, doOperation); + var holdStatus = GetStatus(); - var msgType = ctrlMsg.Length > 1 - ? ctrlMsg[1] - : null; - - if (OnControlCommand != null) - OnControlCommand(msgType ?? Operation.GetName(op)); + Interlocked.CompareExchange(ref status, Status.Stopping, Status.Started); - switch (op) + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {holdStatus}] RunLoop().Stop> Started -> Stopping"); + try { - case Operation.Stop: - if (Log.IsDebugEnabled) - Log.Debug("Stop Command Issued"); - - Interlocked.CompareExchange(ref status, Status.Stopping, Status.Started); - try - { - if (Log.IsDebugEnabled) - Log.Debug("UnSubscribe From All Channels..."); - - subscription.UnSubscribeFromAllChannels(); //Un block thread. - } - finally - { - Interlocked.CompareExchange(ref status, Status.Stopped, Status.Stopping); - } - return; - - case Operation.Reset: - subscription.UnSubscribeFromAllChannels(); //Un block thread. - return; - } + if (Log.IsDebugEnabled) + Log.Debug("UnSubscribe From All Channels..."); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().Stop> subscription.UnSubscribeFromAllChannels()"); - switch (msgType) + // ReSharper disable once AccessToDisposedClosure + subscription.UnSubscribeFromAllChannels(); //Un block thread. + } + finally { - case ControlCommand.Pulse: - Pulse(); - break; + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().Stop> Stopping -> Stopped"); + Interlocked.CompareExchange(ref status, Status.Stopped, Status.Stopping); } - } - else - { - OnMessage(channel, msg); - } - }; - - //blocks thread - if (ChannelsMatching != null && ChannelsMatching.Length > 0) - subscription.SubscribeToChannelsMatching(ChannelsMatching); - else - subscription.SubscribeToChannels(Channels); - - masterClient = null; + return; + + case Operation.Reset: + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().Reset> subscription.UnSubscribeFromAllChannels()"); + + // ReSharper disable once AccessToDisposedClosure + subscription.UnSubscribeFromAllChannels(); //Un block thread. + return; + } + + switch (msgType) + { + case ControlCommand.Pulse: + Pulse(); + break; + } } - } + else + { + OnMessage(channel, msg); + } + }; + + //blocks thread + if (ChannelsMatching != null && ChannelsMatching.Length > 0) + subscription.SubscribeToChannelsMatching(ChannelsMatching); + else + subscription.SubscribeToChannels(Channels); + + masterClient = null; } - if (OnStop != null) - OnStop(); + OnStop?.Invoke(); } catch (Exception ex) { lastExMsg = ex.Message; Interlocked.Increment(ref noOfErrors); Interlocked.Increment(ref noOfContinuousErrors); + + var holdStatus = GetStatus(); if (Interlocked.CompareExchange(ref status, Status.Stopped, Status.Started) != Status.Started) Interlocked.CompareExchange(ref status, Status.Stopped, Status.Stopping); - if (OnStop != null) - OnStop(); + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {holdStatus}] RunLoop().Stop> Started|Stopping -> Stopped"); - if (this.OnError != null) - this.OnError(ex); + OnStop?.Invoke(); + + OnError?.Invoke(ex); } if (AutoRestart && Interlocked.CompareExchange(ref status, 0, 0) != Status.Disposed) { if (WaitBeforeNextRestart != null) TaskUtils.Sleep(WaitBeforeNextRestart.Value); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] RunLoop().AutoRestart> Start()"); Start(); } } @@ -355,10 +382,12 @@ private void Stop(bool shouldRestart) if (Interlocked.CompareExchange(ref status, Status.Stopping, Status.Started) == Status.Started) { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] Stop()> Started -> Stopping"); + if (Log.IsDebugEnabled) Log.Debug("Stopping RedisPubSubServer..."); - //Unblock current bgthread by issuing StopCommand + //Unblock current bg thread by issuing StopCommand SendControlCommand(Operation.Stop); } } @@ -377,18 +406,16 @@ private void NotifyAllSubscribers(string commandType=null) try { - using (var redis = ClientsManager.GetClient()) + using var redis = ClientsManager.GetClient(); + foreach (var channel in Channels) { - foreach (var channel in Channels) - { - redis.PublishMessage(channel, msg); - } + redis.PublishMessage(channel, msg); } } catch (Exception ex) { - if (this.OnError != null) this.OnError(ex); - Log.Warn("Could not send '{0}' message to bg thread: {1}".Fmt(msg, ex.Message)); + OnError?.Invoke(ex); + Log.WarnFormat("Could not send '{0}' message to bg thread: {1}", msg, ex.Message); } } @@ -396,19 +423,16 @@ private void HandleFailover(IRedisClientsManager clientsManager) { try { - if (OnFailover != null) - OnFailover(this); + OnFailover?.Invoke(this); if (masterClient != null) { //New thread-safe client with same connection info as connected master - using (var currentlySubscribedClient = ((RedisClient)masterClient).CloneClient()) + using var currentlySubscribedClient = ((RedisClient)masterClient).CloneClient(); + Interlocked.CompareExchange(ref doOperation, Operation.Reset, doOperation); + foreach (var channel in Channels) { - Interlocked.CompareExchange(ref doOperation, Operation.Reset, doOperation); - foreach (var channel in Channels) - { - currentlySubscribedClient.PublishMessage(channel, ControlCommand.Control); - } + currentlySubscribedClient.PublishMessage(channel, ControlCommand.Control); } } else @@ -418,7 +442,7 @@ private void HandleFailover(IRedisClientsManager clientsManager) } catch (Exception ex) { - if (this.OnError != null) this.OnError(ex); + OnError?.Invoke(ex); Log.Warn("Error trying to UnSubscribeFromChannels in OnFailover. Restarting...", ex); Restart(); } @@ -429,8 +453,7 @@ void HandleUnSubscribe(string channel) if (Log.IsDebugEnabled) Log.Debug("OnUnSubscribe: " + channel); - if (OnUnSubscribe != null) - OnUnSubscribe(channel); + OnUnSubscribe?.Invoke(channel); } public void Restart() @@ -438,7 +461,6 @@ public void Restart() Stop(shouldRestart:true); } -#if !NETSTANDARD1_3 private void KillBgThreadIfExists() { if (bgThread != null && bgThread.IsAlive) @@ -446,32 +468,35 @@ private void KillBgThreadIfExists() //give it a small chance to die gracefully if (!bgThread.Join(500)) { +#if !NETCORE //Ideally we shouldn't get here, but lets try our hardest to clean it up + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] KillBgThreadIfExists()> bgThread.Interrupt()"); Log.Warn("Interrupting previous Background Thread: " + bgThread.Name); bgThread.Interrupt(); if (!bgThread.Join(TimeSpan.FromSeconds(3))) { + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] KillBgThreadIfExists()> bgThread.Abort()"); Log.Warn(bgThread.Name + " just wont die, so we're now aborting it..."); bgThread.Abort(); } +#endif } bgThread = null; } } -#endif private void SleepBackOffMultiplier(int continuousErrorsCount) { if (continuousErrorsCount == 0) return; - const int MaxSleepMs = 60 * 1000; + const int maxSleepMs = 60 * 1000; //exponential/random retry back-off. var nextTry = Math.Min( rand.Next((int)Math.Pow(continuousErrorsCount, 3), (int)Math.Pow(continuousErrorsCount + 1, 3) + 1), - MaxSleepMs); + maxSleepMs); if (Log.IsDebugEnabled) - Log.Debug("Sleeping for {0}ms after {1} continuous errors".Fmt(nextTry, continuousErrorsCount)); + Log.DebugFormat("Sleeping for {0}ms after {1} continuous errors", nextTry, continuousErrorsCount); TaskUtils.Sleep(nextTry); } @@ -514,26 +539,22 @@ class Status //dep-free copy of WorkerStatus public const int Stopping = 1; public const int Starting = 2; public const int Started = 3; - } - public string GetStatus() - { - switch (Interlocked.CompareExchange(ref status, 0, 0)) + public static string GetStatus(int status) { - case Status.Disposed: - return "Disposed"; - case Status.Stopped: - return "Stopped"; - case Status.Stopping: - return "Stopping"; - case Status.Starting: - return "Starting"; - case Status.Started: - return "Started"; + return status switch { + Disposed => nameof(Disposed), + Stopped => nameof(Stopped), + Stopping => nameof(Stopping), + Starting => nameof(Starting), + Started => nameof(Started), + _ => throw new NotSupportedException("Unknown status: " + status) + }; } - return null; } + public string GetStatus() => Status.GetStatus(Interlocked.CompareExchange(ref status, 0, 0)); + public string GetStatsDescription() { var sb = StringBuilderCache.Allocate(); @@ -551,23 +572,30 @@ public virtual void Dispose() { if (Interlocked.CompareExchange(ref status, 0, 0) == Status.Disposed) return; + + if (Log.IsDebugEnabled) + Log.Debug("RedisPubServer.Dispose()..."); + + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {GetStatus()}] Dispose()>"); Stop(); + var holdStatus = GetStatus(); + if (Interlocked.CompareExchange(ref status, Status.Disposed, Status.Stopped) != Status.Stopped) Interlocked.CompareExchange(ref status, Status.Disposed, Status.Stopping); + OnEvent?.Invoke($"[{DateTime.UtcNow.TimeOfDay:g} {holdStatus}] Dispose()> -> Disposed"); + try { - if (OnDispose != null) - OnDispose(); + OnDispose?.Invoke(); } catch (Exception ex) { Log.Error("Error OnDispose(): ", ex); } -#if !NETSTANDARD1_3 try { Thread.Sleep(100); //give it a small chance to die gracefully @@ -575,11 +603,10 @@ public virtual void Dispose() } catch (Exception ex) { - if (this.OnError != null) this.OnError(ex); + OnError?.Invoke(ex); } -#endif DisposeHeartbeatTimer(); } } -} \ No newline at end of file +} diff --git a/src/ServiceStack.Redis/RedisResolver.cs b/src/ServiceStack.Redis/RedisResolver.cs index bbd149e7..a49e0f01 100644 --- a/src/ServiceStack.Redis/RedisResolver.cs +++ b/src/ServiceStack.Redis/RedisResolver.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.IO; using System.Linq; using System.Threading; using ServiceStack.Logging; @@ -19,28 +20,22 @@ public class RedisResolver : IRedisResolver, IRedisResolverExtended HashSet allHosts = new HashSet(); private RedisEndpoint[] masters; - private RedisEndpoint[] slaves; + private RedisEndpoint[] replicas; - public RedisEndpoint[] Masters - { - get { return masters; } - } + public RedisEndpoint[] Masters => masters; - public RedisEndpoint[] Slaves - { - get { return slaves; } - } + public RedisEndpoint[] Slaves => replicas; public RedisResolver() : this(TypeConstants.EmptyArray, TypeConstants.EmptyArray) {} - public RedisResolver(IEnumerable masters, IEnumerable slaves) - : this(masters.ToRedisEndPoints(), slaves.ToRedisEndPoints()){} + public RedisResolver(IEnumerable masters, IEnumerable replicas) + : this(masters.ToRedisEndPoints(), replicas.ToRedisEndPoints()){} - public RedisResolver(IEnumerable masters, IEnumerable slaves) + public RedisResolver(IEnumerable masters, IEnumerable replicas) { ResetMasters(masters.ToList()); - ResetSlaves(slaves.ToList()); + ResetSlaves(replicas.ToList()); ClientFactory = RedisConfig.ClientFactory; } @@ -67,14 +62,14 @@ public virtual void ResetSlaves(IEnumerable hosts) ResetSlaves(hosts.ToRedisEndPoints()); } - public virtual void ResetSlaves(List newSlaves) + public virtual void ResetSlaves(List newReplicas) { - slaves = (newSlaves ?? new List()).ToArray(); - ReadOnlyHostsCount = slaves.Length; - newSlaves.Each(x => allHosts.Add(x)); + replicas = (newReplicas ?? new List()).ToArray(); + ReadOnlyHostsCount = replicas.Length; + newReplicas.Each(x => allHosts.Add(x)); if (log.IsDebugEnabled) - log.Debug("New Redis Slaves: " + string.Join(", ", slaves.Map(x => x.GetHostString()))); + log.Debug("New Redis Replicas: " + string.Join(", ", replicas.Map(x => x.GetHostString()))); } public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) @@ -83,51 +78,79 @@ public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) if (master && RedisConfig.VerifyMasterConnections) { - var role = client.GetServerRole(); - if (role != RedisServerRole.Master) + var firstAttempt = DateTime.UtcNow; + Exception firstEx = null; + var retryTimeSpan = TimeSpan.FromMilliseconds(config.RetryTimeout); + var i = 0; + while (DateTime.UtcNow - firstAttempt < retryTimeSpan) { - Interlocked.Increment(ref RedisState.TotalInvalidMasters); - log.Error("Redis Master Host '{0}' is {1}. Resetting allHosts...".Fmt(config.GetHostString(), role)); - var newMasters = new List(); - var newSlaves = new List(); - RedisClient masterClient = null; - foreach (var hostConfig in allHosts) + try { - try - { - var testClient = ClientFactory(hostConfig); - testClient.ConnectTimeout = RedisConfig.HostLookupTimeoutMs; - var testRole = testClient.GetServerRole(); - switch (testRole) - { - case RedisServerRole.Master: - newMasters.Add(hostConfig); - if (masterClient == null) - masterClient = testClient; - break; - case RedisServerRole.Slave: - newSlaves.Add(hostConfig); - break; - } - - } - catch { /* skip */ } + client = GetValidMaster(client, config); + return client; + } + catch (Exception ex) + { + if (!RedisConfig.RetryReconnectOnFailedMasters) + throw; + + firstEx ??= ex; + ExecUtils.SleepBackOffMultiplier(++i); + client?.Dispose(); + client = ClientFactory(config); } + } + throw new TimeoutException($"Could not resolve master instance within {config.RetryTimeout}ms RetryTimeout", firstEx); + } + + return client; + } - if (masterClient == null) + protected RedisClient GetValidMaster(RedisClient client, RedisEndpoint config) + { + var role = client.GetServerRole(); + if (role != RedisServerRole.Master) + { + Interlocked.Increment(ref RedisState.TotalInvalidMasters); + log.Error("Redis Master Host '{0}' is {1}. Resetting allHosts...".Fmt(config.GetHostString(), role)); + var newMasters = new List(); + var newReplicas = new List(); + RedisClient masterClient = null; + foreach (var hostConfig in allHosts) + { + try { - Interlocked.Increment(ref RedisState.TotalNoMastersFound); - var errorMsg = "No master found in: " + string.Join(", ", allHosts.Map(x => x.GetHostString())); - log.Error(errorMsg); - throw new Exception(errorMsg); + var testClient = ClientFactory(hostConfig); + testClient.ConnectTimeout = RedisConfig.HostLookupTimeoutMs; + var testRole = testClient.GetServerRole(); + switch (testRole) + { + case RedisServerRole.Master: + newMasters.Add(hostConfig); + if (masterClient == null) + masterClient = testClient; + break; + case RedisServerRole.Slave: + newReplicas.Add(hostConfig); + break; + } + } + catch { /* skip */ } + } - ResetMasters(newMasters); - ResetSlaves(newSlaves); - return masterClient; + if (masterClient == null) + { + Interlocked.Increment(ref RedisState.TotalNoMastersFound); + var errorMsg = "No master found in: " + string.Join(", ", allHosts.Map(x => x.GetHostString())); + log.Error(errorMsg); + throw new InvalidDataException(errorMsg); } - } + ResetMasters(newMasters); + ResetSlaves(newReplicas); + return masterClient; + } return client; } @@ -139,7 +162,7 @@ public RedisEndpoint GetReadWriteHost(int desiredIndex) public RedisEndpoint GetReadOnlyHost(int desiredIndex) { return ReadOnlyHostsCount > 0 - ? slaves[desiredIndex % slaves.Length] + ? replicas[desiredIndex % replicas.Length] : GetReadWriteHost(desiredIndex); } diff --git a/src/ServiceStack.Redis/RedisResponseException.cs b/src/ServiceStack.Redis/RedisResponseException.cs index 00e5c537..c7dba956 100644 --- a/src/ServiceStack.Redis/RedisResponseException.cs +++ b/src/ServiceStack.Redis/RedisResponseException.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // diff --git a/src/ServiceStack.Redis/RedisScripts.cs b/src/ServiceStack.Redis/RedisScripts.cs new file mode 100644 index 00000000..7def2bd9 --- /dev/null +++ b/src/ServiceStack.Redis/RedisScripts.cs @@ -0,0 +1,287 @@ +using System; +using System.Collections; +using System.Linq; +using System.Collections.Generic; +using ServiceStack.Script; + +namespace ServiceStack.Redis +{ + public class RedisSearchCursorResult + { + public int Cursor { get; set; } + public List Results { get; set; } + } + + public class RedisSearchResult + { + public string Id { get; set; } + public string Type { get; set; } + public long Ttl { get; set; } + public long Size { get; set; } + } + + [Obsolete("Use RedisScripts")] + public class TemplateRedisFilters : RedisScripts {} + + public class RedisScripts : ScriptMethods + { + private const string RedisConnection = "__redisConnection"; + + private IRedisClientsManager redisManager; + public IRedisClientsManager RedisManager + { + get => redisManager ?? (redisManager = Context.Container.Resolve()); + set => redisManager = value; + } + + T exec(Func fn, ScriptScopeContext scope, object options) + { + try + { + if ((options is Dictionary obj && obj.TryGetValue("connectionString", out var oRedisConn)) + || scope.PageResult.Args.TryGetValue(RedisConnection, out oRedisConn)) + { + using (var redis = new RedisClient((string)oRedisConn)) + { + return fn(redis); + } + } + + using (var redis = RedisManager.GetClient()) + { + return fn(redis); + } + } + catch (Exception ex) + { + throw new StopFilterExecutionException(scope, options, ex); + } + } + + public IgnoreResult useRedis(ScriptScopeContext scope, string redisConnection) + { + if (redisConnection == null) + scope.PageResult.Args.Remove(RedisConnection); + else + scope.PageResult.Args[RedisConnection] = redisConnection; + + return IgnoreResult.Value; + } + + static readonly Dictionary cmdArgCounts = new Dictionary(StringComparer.OrdinalIgnoreCase) { + { "SET", 3 } + }; + + List parseCommandString(string cmd) + { + var args = new List(); + var lastPos = 0; + for (var i = 0; i < cmd.Length; i++) + { + var c = cmd[i]; + if (c == '{' || c == '[') + { + break; //stop splitting args if value is complex type + } + if (c == ' ') + { + var arg = cmd.Substring(lastPos, i - lastPos); + args.Add(arg); + lastPos = i + 1; + + //if we've reached the command args count, capture the rest of the body as the last arg + if (cmdArgCounts.TryGetValue(args[0], out int argCount) && args.Count == argCount - 1) + break; + } + } + args.Add(cmd.Substring(lastPos)); + return args; + } + + object toObject(RedisText r) + { + if (r == null) + return null; + + if (r.Children != null && r.Children.Count > 0) + { + var to = new List(); + for (var i = 0; i < r.Children.Count; i++) + { + var child = r.Children[i]; + var value = child.Text ?? toObject(child); + to.Add(value); + } + return to; + } + return r.Text; + } + + public object redisCall(ScriptScopeContext scope, object redisCommand) => redisCall(scope, redisCommand, null); + public object redisCall(ScriptScopeContext scope, object redisCommand, object options) + { + if (redisCommand == null) + return null; + + List args; + if (redisCommand is string cmd) + { + if (string.IsNullOrEmpty(cmd)) + return null; + + args = parseCommandString(cmd); + } + else if (redisCommand is IEnumerable e && !(e is IDictionary)) + { + args = new List(); + foreach (var arg in e) + { + if (arg == null) continue; + args.Add(arg.ToString()); + } + } + else + throw new NotSupportedException($"redisCall expects a string or an object args but received a {redisCommand.GetType().Name} instead."); + + var objParams = args.Select(x => (object)x).ToArray(); + var redisText = exec(r => r.Custom(objParams), scope, options); + var result = toObject(redisText); + return result; + } + + public List redisSearchKeys(ScriptScopeContext scope, string query) => redisSearchKeys(scope, query, null); + public List redisSearchKeys(ScriptScopeContext scope, string query, object options) + { + var json = redisSearchKeysAsJson(scope, query, options); + const string noResult = "{\"cursor\":0,\"results\":{}}"; + if (json == noResult) + return new List(); + + var searchResults = json.FromJson(); + return searchResults.Results; + } + + public Dictionary redisInfo(ScriptScopeContext scope) => redisInfo(scope, null); + public Dictionary redisInfo(ScriptScopeContext scope, object options) => exec(r => r.Info, scope, options); + + public string redisConnectionString(ScriptScopeContext scope) => exec(r => $"{r.Host}:{r.Port}?db={r.Db}", scope, null); + + public Dictionary redisConnection(ScriptScopeContext scope) => exec(r => new Dictionary + { + { "host", r.Host }, + { "port", r.Port }, + { "db", r.Db }, + }, scope, null); + + public string redisToConnectionString(ScriptScopeContext scope, object connectionInfo) => redisToConnectionString(scope, connectionInfo, null); + public string redisToConnectionString(ScriptScopeContext scope, object connectionInfo, object options) + { + var connectionString = connectionInfo as string; + if (connectionString != null) + return connectionString; + + if (connectionInfo is IDictionary d) + { + var host = (d.TryGetValue("host", out object h) ? h as string : null) ?? "localhost"; + var port = d.TryGetValue("port", out object p) ? DynamicInt.Instance.ConvertFrom(p) : 6379; + var db = d.TryGetValue("db", out object oDb) ? DynamicInt.Instance.ConvertFrom(oDb) : 0; + + connectionString = $"{host}:{port}?db={db}"; + + if (d.TryGetValue("password", out object password)) + connectionString += "&password=" + password.ToString().UrlEncode(); + } + + return connectionString; + } + + public string redisChangeConnection(ScriptScopeContext scope, object newConnection) => redisChangeConnection(scope, newConnection, null); + public string redisChangeConnection(ScriptScopeContext scope, object newConnection, object options) + { + try + { + var connectionString = redisToConnectionString(scope, newConnection, options); + if (connectionString == null) + throw new NotSupportedException(nameof(redisChangeConnection) + " expects a String or an ObjectDictionary but received: " + (newConnection?.GetType().Name ?? "null")); + + using (var testConnection = new RedisClient(connectionString)) + { + testConnection.Ping(); + } + + ((IRedisFailover)RedisManager).FailoverTo(connectionString); + + return connectionString; + } + catch (Exception ex) + { + throw new StopFilterExecutionException(scope, options ?? newConnection as IDictionary, ex); + } + } + + public string redisSearchKeysAsJson(ScriptScopeContext scope, string query, object options) + { + if (string.IsNullOrEmpty(query)) + return null; + + try + { + var args = scope.AssertOptions(nameof(redisSearchKeys), options); + var limit = args.TryGetValue("limit", out object value) + ? value.ConvertTo() + : scope.GetValue("redis.search.limit") ?? 100; + + const string LuaScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = tonumber(ARGV[3]) +local len = 0 +local keys = {} +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(keys, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit +local cursorAttrs = {['cursor'] = cursor, ['results'] = {}} +if len == 0 then + return cjson.encode(cursorAttrs) +end + +local keyAttrs = {} +for i,key in ipairs(keys) do + local type = redis.call('type', key)['ok'] + local pttl = redis.call('pttl', key) + local size = 0 + if type == 'string' then + size = redis.call('strlen', key) + elseif type == 'list' then + size = redis.call('llen', key) + elseif type == 'set' then + size = redis.call('scard', key) + elseif type == 'zset' then + size = redis.call('zcard', key) + elseif type == 'hash' then + size = redis.call('hlen', key) + end + local attrs = {['id'] = key, ['type'] = type, ['ttl'] = pttl, ['size'] = size, ['foo'] = 'bar'} + table.insert(keyAttrs, attrs) +end +cursorAttrs['results'] = keyAttrs +return cjson.encode(cursorAttrs)"; + + var json = exec(r => r.ExecCachedLua(LuaScript, sha1 => + r.ExecLuaShaAsString(sha1, query, limit.ToString(), "0")), scope, options); + + return json; + } + catch (Exception ex) + { + throw new StopFilterExecutionException(scope, options, ex); + } + } + } +} diff --git a/src/ServiceStack.Redis/RedisSentinel.cs b/src/ServiceStack.Redis/RedisSentinel.cs index c5057439..bdac92c8 100644 --- a/src/ServiceStack.Redis/RedisSentinel.cs +++ b/src/ServiceStack.Redis/RedisSentinel.cs @@ -2,7 +2,7 @@ // Redis Sentinel will connect to a Redis Sentinel Instance and create an IRedisClientsManager based off of the first sentinel that returns data // // Upon failure of a sentinel, other sentinels will be attempted to be connected to -// Upon a s_down event, the RedisClientsManager will be failed over to the new set of slaves/masters +// Upon a s_down event, the RedisClientsManager will be failed over to the new set of masters/replicas // using System; @@ -23,14 +23,11 @@ public class RedisSentinel : IRedisSentinel public static string DefaultMasterName = "mymaster"; public static string DefaultAddress = "127.0.0.1:26379"; - private object oLock = new object(); + private readonly object oLock = new object(); private bool isDisposed = false; private readonly string masterName; - public string MasterName - { - get { return masterName; } - } + public string MasterName => masterName; private int failures = 0; private int sentinelIndex = -1; @@ -45,10 +42,15 @@ public string MasterName public Func RedisManagerFactory { get; set; } /// - /// Configure the Redis Connection String to use for a Redis Client Host + /// Configure the Redis Connection String to use for a Redis Instance Host /// public Func HostFilter { get; set; } + /// + /// Configure the Redis Connection String to use for a Redis Sentinel Host + /// + public Func SentinelHostFilter { get; set; } + /// /// The configured Redis Client Manager this Sentinel managers /// @@ -77,80 +79,70 @@ public string MasterName /// /// Whether to routinely scan for other sentinel hosts (default true) /// - public bool ScanForOtherSentinels { get; set; } + public bool ScanForOtherSentinels { get; set; } = true; /// /// What interval to scan for other sentinel hosts (default 10 mins) /// - public TimeSpan RefreshSentinelHostsAfter { get; set; } + public TimeSpan RefreshSentinelHostsAfter { get; set; } = TimeSpan.FromMinutes(10); private DateTime lastSentinelsRefresh; /// /// How long to wait after failing before connecting to next redis instance (default 250ms) /// - public TimeSpan WaitBetweenFailedHosts { get; set; } + public TimeSpan WaitBetweenFailedHosts { get; set; } = TimeSpan.FromMilliseconds(250); /// /// How long to retry connecting to hosts before throwing (default 60 secs) /// - public TimeSpan MaxWaitBetweenFailedHosts { get; set; } + public TimeSpan MaxWaitBetweenFailedHosts { get; set; } = TimeSpan.FromSeconds(60); /// /// How long to wait after consecutive failed connection attempts to master before forcing /// a Sentinel to failover the current master (default 60 secs) /// - public TimeSpan WaitBeforeForcingMasterFailover { get; set; } + public TimeSpan WaitBeforeForcingMasterFailover { get; set; } = TimeSpan.FromSeconds(60); /// - /// The Max Connection time for Sentinel Worker (default 100ms) + /// The Max Connection time for Sentinel Worker (default 250ms) /// - public int SentinelWorkerConnectTimeoutMs { get; set; } + public int SentinelWorkerConnectTimeoutMs { get; set; } = 250; /// - /// The Max TCP Socket Receive time for Sentinel Worker (default 100ms) + /// The Max TCP Socket Receive time for Sentinel Worker (default 250ms) /// - public int SentinelWorkerReceiveTimeoutMs { get; set; } + public int SentinelWorkerReceiveTimeoutMs { get; set; } = 250; /// - /// The Max TCP Socket Send time for Sentinel Worker (default 100ms) + /// The Max TCP Socket Send time for Sentinel Worker (default 250ms) /// - public int SentinelWorkerSendTimeoutMs { get; set; } + public int SentinelWorkerSendTimeoutMs { get; set; } = 250; /// /// Reset client connections when Sentinel reports redis instance is subjectively down (default true) /// - public bool ResetWhenSubjectivelyDown { get; set; } + public bool ResetWhenSubjectivelyDown { get; set; } = true; /// /// Reset client connections when Sentinel reports redis instance is objectively down (default true) /// - public bool ResetWhenObjectivelyDown { get; set; } + public bool ResetWhenObjectivelyDown { get; set; } = true; + + internal string DebugId => $""; public RedisSentinel(string sentinelHost = null, string masterName = null) : this(new[] { sentinelHost ?? DefaultAddress }, masterName ?? DefaultMasterName) { } public RedisSentinel(IEnumerable sentinelHosts, string masterName = null) { - this.SentinelHosts = sentinelHosts != null - ? sentinelHosts.ToList() - : null; + this.SentinelHosts = sentinelHosts?.ToList(); if (SentinelHosts == null || SentinelHosts.Count == 0) throw new ArgumentException("sentinels must have at least one entry"); this.masterName = masterName ?? DefaultMasterName; IpAddressMap = new Dictionary(); - RedisManagerFactory = (masters, slaves) => new PooledRedisClientManager(masters, slaves); - ScanForOtherSentinels = true; - RefreshSentinelHostsAfter = TimeSpan.FromMinutes(10); - ResetWhenObjectivelyDown = true; - ResetWhenSubjectivelyDown = true; - SentinelWorkerConnectTimeoutMs = 100; - SentinelWorkerReceiveTimeoutMs = 100; - SentinelWorkerSendTimeoutMs = 100; - WaitBetweenFailedHosts = TimeSpan.FromMilliseconds(250); - MaxWaitBetweenFailedHosts = TimeSpan.FromSeconds(60); - WaitBeforeForcingMasterFailover = TimeSpan.FromSeconds(60); + RedisManagerFactory = (masters, replicas) => new PooledRedisClientManager(masters, replicas); } /// @@ -165,7 +157,7 @@ public IRedisClientsManager Start() var parts = SentinelHosts[i].SplitOnLast(':'); if (parts.Length == 1) { - SentinelHosts[i] = parts[0] + ":{0}".Fmt(RedisConfig.DefaultPortSentinel); + SentinelHosts[i] = parts[0] + ":" + RedisConfig.DefaultPortSentinel; } } @@ -198,15 +190,18 @@ public List GetActiveSentinelHosts(IEnumerable sentinelHosts) var endpoint = sentinelHost.ToRedisEndpoint(defaultPort: RedisConfig.DefaultPortSentinel); using (var sentinelWorker = new RedisSentinelWorker(this, endpoint)) { - var activeHosts = sentinelWorker.GetSentinelHosts(MasterName); - if (!activeSentinelHosts.Contains(sentinelHost)) activeSentinelHosts.Add(sentinelHost); + var activeHosts = sentinelWorker.GetSentinelHosts(MasterName); foreach (var activeHost in activeHosts) { if (!activeSentinelHosts.Contains(activeHost)) - activeSentinelHosts.Add(activeHost); + { + activeSentinelHosts.Add(SentinelHostFilter != null + ? SentinelHostFilter(activeHost) + : activeHost); + } } } @@ -259,14 +254,14 @@ public SentinelInfo ResetClients() if (RedisManager == null) { if (Log.IsDebugEnabled) - Log.Debug("Configuring initial Redis Clients: {0}".Fmt(sentinelInfo)); + Log.Debug($"Configuring initial Redis Clients: {sentinelInfo}"); RedisManager = CreateRedisManager(sentinelInfo); } else { if (Log.IsDebugEnabled) - Log.Debug("Failing over to Redis Clients: {0}".Fmt(sentinelInfo)); + Log.Debug($"Failing over to Redis Clients: {sentinelInfo}"); ((IRedisFailover)RedisManager).FailoverTo( ConfigureHosts(sentinelInfo.RedisMasters), @@ -279,24 +274,21 @@ public SentinelInfo ResetClients() private IRedisClientsManager CreateRedisManager(SentinelInfo sentinelInfo) { var masters = ConfigureHosts(sentinelInfo.RedisMasters); - var slaves = ConfigureHosts(sentinelInfo.RedisSlaves); - var redisManager = RedisManagerFactory(masters, slaves); + var replicas = ConfigureHosts(sentinelInfo.RedisSlaves); + var redisManager = RedisManagerFactory(masters, replicas); var hasRedisResolver = (IHasRedisResolver)redisManager; - hasRedisResolver.RedisResolver = new RedisSentinelResolver(this, masters, slaves); + hasRedisResolver.RedisResolver = new RedisSentinelResolver(this, masters, replicas); - var canFailover = redisManager as IRedisFailover; - if (canFailover != null && this.OnFailover != null) + if (redisManager is IRedisFailover canFailover && this.OnFailover != null) { canFailover.OnFailover.Add(this.OnFailover); } return redisManager; } - public IRedisClientsManager GetRedisManager() - { - return RedisManager ?? (RedisManager = CreateRedisManager(GetSentinelInfo())); - } + public IRedisClientsManager GetRedisManager() => + RedisManager ??= CreateRedisManager(GetSentinelInfo()); private RedisSentinelWorker GetValidSentinelWorker() { @@ -310,16 +302,31 @@ private RedisSentinelWorker GetValidSentinelWorker() while (this.worker == null && ShouldRetry()) { + var step = 0; try { this.worker = GetNextSentinel(); + step = 1; GetRedisManager(); + + step = 2; this.worker.BeginListeningForConfigurationChanges(); this.failures = 0; //reset return this.worker; } catch (RedisException ex) { + if (Log.IsDebugEnabled) + { + var name = step switch { + 0 => "GetNextSentinel()", + 1 => "GetRedisManager()", + 2 => "BeginListeningForConfigurationChanges()", + _ => $"Step {step}", + }; + Log.Debug($"Failed to {name}: {ex.Message}"); + } + if (OnWorkerError != null) OnWorkerError(ex); @@ -338,29 +345,23 @@ private RedisSentinelWorker GetValidSentinelWorker() public RedisEndpoint GetMaster() { var sentinelWorker = GetValidSentinelWorker(); - lock (sentinelWorker) - { - var host = sentinelWorker.GetMasterHost(masterName); - - if (ScanForOtherSentinels && DateTime.UtcNow - lastSentinelsRefresh > RefreshSentinelHostsAfter) - { - RefreshActiveSentinels(); - } + var host = sentinelWorker.GetMasterHost(masterName); - return host != null - ? (HostFilter != null ? HostFilter(host) : host).ToRedisEndpoint() - : null; + if (ScanForOtherSentinels && DateTime.UtcNow - lastSentinelsRefresh > RefreshSentinelHostsAfter) + { + RefreshActiveSentinels(); } + + return host != null + ? (HostFilter != null ? HostFilter(host) : host).ToRedisEndpoint() + : null; } public List GetSlaves() { var sentinelWorker = GetValidSentinelWorker(); - lock (sentinelWorker) - { - var hosts = sentinelWorker.GetSlaveHosts(masterName); - return ConfigureHosts(hosts).Map(x => x.ToRedisEndpoint()); - } + var hosts = sentinelWorker.GetReplicaHosts(masterName); + return ConfigureHosts(hosts).Map(x => x.ToRedisEndpoint()); } /// @@ -375,23 +376,35 @@ private bool ShouldRetry() private RedisSentinelWorker GetNextSentinel() { - lock (oLock) + RedisSentinelWorker disposeWorker = null; + + try { - if (this.worker != null) + lock (oLock) { - this.worker.Dispose(); - this.worker = null; - } + if (this.worker != null) + { + disposeWorker = this.worker; + this.worker = null; + } - if (++sentinelIndex >= SentinelEndpoints.Length) - sentinelIndex = 0; + if (++sentinelIndex >= SentinelEndpoints.Length) + sentinelIndex = 0; + + if (Log.IsDebugEnabled) + Log.Debug($"Attempt to connect to next sentinel '{SentinelEndpoints[sentinelIndex]}'..."); - var sentinelWorker = new RedisSentinelWorker(this, SentinelEndpoints[sentinelIndex]) - { - OnSentinelError = OnSentinelError - }; + var sentinelWorker = new RedisSentinelWorker(this, SentinelEndpoints[sentinelIndex]) + { + OnSentinelError = OnSentinelError + }; - return sentinelWorker; + return sentinelWorker; + } + } + finally + { + disposeWorker?.Dispose(); } } @@ -412,19 +425,13 @@ private void OnSentinelError(Exception ex) public void ForceMasterFailover() { var sentinelWorker = GetValidSentinelWorker(); - lock (sentinelWorker) - { - sentinelWorker.ForceMasterFailover(masterName); - } + sentinelWorker.ForceMasterFailover(masterName); } public SentinelInfo GetSentinelInfo() { var sentinelWorker = GetValidSentinelWorker(); - lock (sentinelWorker) - { - return sentinelWorker.GetSentinelInfo(); - } + return sentinelWorker.GetSentinelInfo(); } public void Dispose() @@ -442,18 +449,15 @@ public class SentinelInfo public string[] RedisMasters { get; set; } public string[] RedisSlaves { get; set; } - public SentinelInfo(string masterName, IEnumerable redisMasters, IEnumerable redisSlaves) + public SentinelInfo(string masterName, IEnumerable redisMasters, IEnumerable redisReplicas) { MasterName = masterName; - RedisMasters = redisMasters != null ? redisMasters.ToArray() : TypeConstants.EmptyStringArray; - RedisSlaves = redisSlaves != null ? redisSlaves.ToArray() : TypeConstants.EmptyStringArray; + RedisMasters = redisMasters?.ToArray() ?? TypeConstants.EmptyStringArray; + RedisSlaves = redisReplicas?.ToArray() ?? TypeConstants.EmptyStringArray; } public override string ToString() { - return "{0} masters: {1}, slaves: {2}".Fmt( - MasterName, - string.Join(", ", RedisMasters), - string.Join(", ", RedisSlaves)); + return $"{MasterName} primary: {string.Join(", ", RedisMasters)}, replicas: {string.Join(", ", RedisSlaves)}"; } } diff --git a/src/ServiceStack.Redis/RedisSentinelResolver.cs b/src/ServiceStack.Redis/RedisSentinelResolver.cs index 6cbc728e..e0baf03a 100644 --- a/src/ServiceStack.Redis/RedisSentinelResolver.cs +++ b/src/ServiceStack.Redis/RedisSentinelResolver.cs @@ -23,28 +23,22 @@ public class RedisSentinelResolver : IRedisResolver, IRedisResolverExtended private RedisSentinel sentinel; private RedisEndpoint[] masters; - private RedisEndpoint[] slaves; + private RedisEndpoint[] replicas; - public RedisEndpoint[] Masters - { - get { return masters; } - } - public RedisEndpoint[] Slaves - { - get { return slaves; } - } + public RedisEndpoint[] Masters => masters; + public RedisEndpoint[] Slaves => replicas; public RedisSentinelResolver(RedisSentinel sentinel) : this(sentinel, TypeConstants.EmptyArray, TypeConstants.EmptyArray) { } - public RedisSentinelResolver(RedisSentinel sentinel, IEnumerable masters, IEnumerable slaves) - : this(sentinel, masters.ToRedisEndPoints(), slaves.ToRedisEndPoints()) { } + public RedisSentinelResolver(RedisSentinel sentinel, IEnumerable masters, IEnumerable replicas) + : this(sentinel, masters.ToRedisEndPoints(), replicas.ToRedisEndPoints()) { } - public RedisSentinelResolver(RedisSentinel sentinel, IEnumerable masters, IEnumerable slaves) + public RedisSentinelResolver(RedisSentinel sentinel, IEnumerable masters, IEnumerable replicas) { this.sentinel = sentinel; ResetMasters(masters.ToList()); - ResetSlaves(slaves.ToList()); + ResetSlaves(replicas.ToList()); ClientFactory = RedisConfig.ClientFactory; } @@ -71,14 +65,14 @@ public virtual void ResetSlaves(IEnumerable hosts) ResetSlaves(hosts.ToRedisEndPoints()); } - public virtual void ResetSlaves(List newSlaves) + public virtual void ResetSlaves(List newReplicas) { - slaves = (newSlaves ?? new List()).ToArray(); - ReadOnlyHostsCount = slaves.Length; - newSlaves.Each(x => allHosts.Add(x)); + replicas = (newReplicas ?? new List()).ToArray(); + ReadOnlyHostsCount = replicas.Length; + newReplicas.Each(x => allHosts.Add(x)); if (log.IsDebugEnabled) - log.Debug("New Redis Slaves: " + string.Join(", ", slaves.Map(x => x.GetHostString()))); + log.Debug("New Redis Replicas: " + string.Join(", ", replicas.Map(x => x.GetHostString()))); } public RedisEndpoint GetReadWriteHost(int desiredIndex) @@ -88,12 +82,12 @@ public RedisEndpoint GetReadWriteHost(int desiredIndex) public RedisEndpoint GetReadOnlyHost(int desiredIndex) { - var slavesEndpoints = sentinel.GetSlaves(); - if (slavesEndpoints.Count > 0) - return slavesEndpoints[desiredIndex % slavesEndpoints.Count]; + var replicaEndpoints = sentinel.GetSlaves(); + if (replicaEndpoints.Count > 0) + return replicaEndpoints[desiredIndex % replicaEndpoints.Count]; return ReadOnlyHostsCount > 0 - ? slaves[desiredIndex % slaves.Length] + ? replicas[desiredIndex % replicas.Length] : GetReadWriteHost(desiredIndex); } @@ -113,8 +107,8 @@ public RedisClient CreateSlaveClient(int desiredIndex) private DateTime lastValidMasterFromSentinelAt { - get { return new DateTime(Interlocked.Read(ref lastValidMasterTicks), DateTimeKind.Utc); } - set { Interlocked.Exchange(ref lastValidMasterTicks, value.Ticks); } + get => new DateTime(Interlocked.Read(ref lastValidMasterTicks), DateTimeKind.Utc); + set => Interlocked.Exchange(ref lastValidMasterTicks, value.Ticks); } public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) @@ -200,7 +194,7 @@ public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) log.Error("Redis Master Host '{0}' is {1}. Resetting allHosts...".Fmt(config.GetHostString(), role), ex); var newMasters = new List(); - var newSlaves = new List(); + var newReplicas = new List(); RedisClient masterClient = null; foreach (var hostConfig in allHosts) { @@ -217,7 +211,7 @@ public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) masterClient = testClient; break; case RedisServerRole.Slave: - newSlaves.Add(hostConfig); + newReplicas.Add(hostConfig); break; } @@ -234,7 +228,7 @@ public virtual RedisClient CreateRedisClient(RedisEndpoint config, bool master) } ResetMasters(newMasters); - ResetSlaves(newSlaves); + ResetSlaves(newReplicas); return masterClient; } } diff --git a/src/ServiceStack.Redis/RedisSentinelWorker.cs b/src/ServiceStack.Redis/RedisSentinelWorker.cs index 5671a722..978ebf37 100644 --- a/src/ServiceStack.Redis/RedisSentinelWorker.cs +++ b/src/ServiceStack.Redis/RedisSentinelWorker.cs @@ -9,15 +9,23 @@ internal class RedisSentinelWorker : IDisposable { protected static readonly ILog Log = LogManager.GetLogger(typeof(RedisSentinelWorker)); + static int IdCounter = 0; + public int Id { get; } + + private readonly object oLock = new(); + + private readonly RedisEndpoint sentinelEndpoint; private readonly RedisSentinel sentinel; private readonly RedisClient sentinelClient; - private RedisPubSubServer sentinePubSub; + private RedisPubSubServer sentinelPubSub; public Action OnSentinelError; public RedisSentinelWorker(RedisSentinel sentinel, RedisEndpoint sentinelEndpoint) { + this.Id = Interlocked.Increment(ref IdCounter); this.sentinel = sentinel; + this.sentinelEndpoint = sentinelEndpoint; this.sentinelClient = new RedisClient(sentinelEndpoint) { Db = 0, //Sentinel Servers doesn't support DB, reset to 0 ConnectTimeout = sentinel.SentinelWorkerConnectTimeoutMs, @@ -26,7 +34,7 @@ public RedisSentinelWorker(RedisSentinel sentinel, RedisEndpoint sentinelEndpoin }; if (Log.IsDebugEnabled) - Log.Debug("Set up Redis Sentinel on {0}".Fmt(sentinelEndpoint)); + Log.Debug($"Set up Redis Sentinel on {sentinelEndpoint}"); } /// @@ -37,7 +45,7 @@ public RedisSentinelWorker(RedisSentinel sentinel, RedisEndpoint sentinelEndpoin private void SentinelMessageReceived(string channel, string message) { if (Log.IsDebugEnabled) - Log.Debug("Received '{0}' on channel '{1}' from Sentinel".Fmt(channel, message)); + Log.Debug($"Received '{channel}' on channel '{message}' from Sentinel"); // {+|-}sdown is the event for server coming up or down var c = channel.ToLower(); @@ -55,7 +63,7 @@ private void SentinelMessageReceived(string channel, string message) || (sentinel.ResetWhenObjectivelyDown && isObjectivelyDown)) { if (Log.IsDebugEnabled) - Log.Debug("Sentinel detected server down/up '{0}' with message: {1}".Fmt(channel, message)); + Log.Debug($"Sentinel detected server down/up '{channel}' with message: {message}"); sentinel.ResetClients(); } @@ -73,7 +81,7 @@ internal SentinelInfo GetSentinelInfo() var sentinelInfo = new SentinelInfo( sentinel.MasterName, new[] { masterHost }, - GetSlaveHosts(sentinel.MasterName)); + GetReplicaHosts(sentinel.MasterName)); return sentinelInfo; } @@ -95,7 +103,10 @@ internal string GetMasterHost(string masterName) private string GetMasterHostInternal(string masterName) { - var masterInfo = sentinelClient.SentinelGetMasterAddrByName(masterName); + List masterInfo; + lock (oLock) + masterInfo = sentinelClient.SentinelGetMasterAddrByName(masterName); + return masterInfo.Count > 0 ? SanitizeMasterConfig(masterInfo) : null; @@ -106,44 +117,47 @@ private string SanitizeMasterConfig(List masterInfo) var ip = masterInfo[0]; var port = masterInfo[1]; - string aliasIp; - if (sentinel.IpAddressMap.TryGetValue(ip, out aliasIp)) + if (sentinel.IpAddressMap.TryGetValue(ip, out var aliasIp)) ip = aliasIp; - return "{0}:{1}".Fmt(ip, port); + return $"{ip}:{port}"; } internal List GetSentinelHosts(string masterName) { - return SanitizeHostsConfig(this.sentinelClient.SentinelSentinels(sentinel.MasterName)); + List> sentinelSentinels; + lock (oLock) + sentinelSentinels = this.sentinelClient.SentinelSentinels(sentinel.MasterName); + + return SanitizeHostsConfig(sentinelSentinels); } - internal List GetSlaveHosts(string masterName) + internal List GetReplicaHosts(string masterName) { - return SanitizeHostsConfig(this.sentinelClient.SentinelSlaves(sentinel.MasterName)); + List> sentinelReplicas; + + lock (oLock) + sentinelReplicas = sentinelClient.SentinelSlaves(sentinel.MasterName); + + return SanitizeHostsConfig(sentinelReplicas); } - private List SanitizeHostsConfig(IEnumerable> slaves) + private List SanitizeHostsConfig(IEnumerable> replicas) { - string ip; - string port; - string flags; - var servers = new List(); - foreach (var slave in slaves) + foreach (var replica in replicas) { - slave.TryGetValue("flags", out flags); - slave.TryGetValue("ip", out ip); - slave.TryGetValue("port", out port); + replica.TryGetValue("flags", out var flags); + replica.TryGetValue("ip", out var ip); + replica.TryGetValue("port", out var port); - string aliasIp; - if (sentinel.IpAddressMap.TryGetValue(ip, out aliasIp)) + if (sentinel.IpAddressMap.TryGetValue(ip, out var aliasIp)) ip = aliasIp; else if (ip == "127.0.0.1") ip = this.sentinelClient.Host; if (ip != null && port != null && !flags.Contains("s_down") && !flags.Contains("o_down")) - servers.Add("{0}:{1}".Fmt(ip, port)); + servers.Add($"{ip}:{port}"); } return servers; } @@ -152,27 +166,35 @@ public void BeginListeningForConfigurationChanges() { try { - if (this.sentinePubSub == null) + lock (oLock) { - var sentinelManager = new BasicRedisClientManager(sentinel.SentinelHosts, sentinel.SentinelHosts) + if (this.sentinelPubSub == null) { - //Use BasicRedisResolver which doesn't validate non-Master Sentinel instances - RedisResolver = new BasicRedisResolver(sentinel.SentinelEndpoints, sentinel.SentinelEndpoints) - }; - this.sentinePubSub = new RedisPubSubServer(sentinelManager) - { - HeartbeatInterval = null, - IsSentinelSubscription = true, - ChannelsMatching = new[] { RedisPubSubServer.AllChannelsWildCard }, - OnMessage = SentinelMessageReceived - }; + var currentSentinelHost = new[] {sentinelEndpoint}; + var sentinelManager = new BasicRedisClientManager(currentSentinelHost, currentSentinelHost) + { + //Use BasicRedisResolver which doesn't validate non-Master Sentinel instances + RedisResolver = new BasicRedisResolver(currentSentinelHost, currentSentinelHost) + }; + + if (Log.IsDebugEnabled) + Log.Debug($"Starting subscription to {sentinel.SentinelHosts.ToArray()}, replicas: {sentinel.SentinelHosts.ToArray()}..."); + + this.sentinelPubSub = new RedisPubSubServer(sentinelManager) + { + HeartbeatInterval = null, + IsSentinelSubscription = true, + ChannelsMatching = new[] { RedisPubSubServer.AllChannelsWildCard }, + OnMessage = SentinelMessageReceived + }; + } } - this.sentinePubSub.Start(); + + this.sentinelPubSub.Start(); } catch (Exception ex) { - Log.Error("Error Subscribing to Redis Channel on {0}:{1}" - .Fmt(this.sentinelClient.Host, this.sentinelClient.Port), ex); + Log.Error($"Error Subscribing to Redis Channel on {sentinelClient.Host}:{sentinelClient.Port}", ex); if (OnSentinelError != null) OnSentinelError(ex); @@ -181,12 +203,13 @@ public void BeginListeningForConfigurationChanges() public void ForceMasterFailover(string masterName) { - this.sentinelClient.SentinelFailover(masterName); + lock (oLock) + this.sentinelClient.SentinelFailover(masterName); } public void Dispose() { - new IDisposable[] { this.sentinelClient, sentinePubSub }.Dispose(Log); + new IDisposable[] { this.sentinelClient, sentinelPubSub }.Dispose(Log); } } } diff --git a/src/ServiceStack.Redis/RedisSubscription.Async.cs b/src/ServiceStack.Redis/RedisSubscription.Async.cs new file mode 100644 index 00000000..95a861f9 --- /dev/null +++ b/src/ServiceStack.Redis/RedisSubscription.Async.cs @@ -0,0 +1,184 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis +{ + partial class RedisSubscription + : IRedisSubscriptionAsync + { + // private events here for +/- semantics + private event Func OnSubscribeAsync; + private event Func OnMessageAsync; + private event Func OnMessageBytesAsync; + private event Func OnUnSubscribeAsync; + + event Func IRedisSubscriptionAsync.OnSubscribeAsync + { + add => OnSubscribeAsync += value; + remove => OnSubscribeAsync -= value; + } + event Func IRedisSubscriptionAsync.OnMessageAsync + { + add => OnMessageAsync += value; + remove => OnMessageAsync -= value; + } + event Func IRedisSubscriptionAsync.OnMessageBytesAsync + { + add => OnMessageBytesAsync += value; + remove => OnMessageBytesAsync -= value; + } + event Func IRedisSubscriptionAsync.OnUnSubscribeAsync + { + add => OnUnSubscribeAsync += value; + remove => OnUnSubscribeAsync -= value; + } + + private IRedisSubscriptionAsync AsAsync() => this; + private IRedisNativeClientAsync NativeAsync + { + get + { + return redisClient as IRedisNativeClientAsync ?? NotAsync(); + static IRedisNativeClientAsync NotAsync() => throw new InvalidOperationException("The underlying client is not async"); + } + } + + private async ValueTask UnSubscribeFromAllChannelsMatchingAnyPatternsAsync(CancellationToken token = default) + { + if (activeChannels.Count == 0) return; + + var multiBytes = await NativeAsync.PUnSubscribeAsync(Array.Empty(), token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + this.activeChannels = new List(); + } + + ValueTask IAsyncDisposable.DisposeAsync() => IsPSubscription + ? UnSubscribeFromAllChannelsMatchingAnyPatternsAsync() + : AsAsync().UnSubscribeFromAllChannelsAsync(); + + async ValueTask IRedisSubscriptionAsync.SubscribeToChannelsAsync(string[] channels, CancellationToken token) + { + var multiBytes = await NativeAsync.SubscribeAsync(channels, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + while (this.SubscriptionCount > 0) + { + multiBytes = await NativeAsync.ReceiveMessagesAsync(token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + } + + async ValueTask IRedisSubscriptionAsync.SubscribeToChannelsMatchingAsync(string[] patterns, CancellationToken token) + { + var multiBytes = await NativeAsync.PSubscribeAsync(patterns, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + while (this.SubscriptionCount > 0) + { + multiBytes = await NativeAsync.ReceiveMessagesAsync(token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + } + + async ValueTask IRedisSubscriptionAsync.UnSubscribeFromAllChannelsAsync(CancellationToken token) + { + if (activeChannels.Count == 0) return; + + var multiBytes = await NativeAsync.UnSubscribeAsync(Array.Empty(), token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + + this.activeChannels = new List(); + } + + async ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsAsync(string[] channels, CancellationToken token) + { + var multiBytes = await NativeAsync.UnSubscribeAsync(channels, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + + async ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsMatchingAsync(string[] patterns, CancellationToken token) + { + var multiBytes = await NativeAsync.PUnSubscribeAsync(patterns, token).ConfigureAwait(false); + await ParseSubscriptionResultsAsync(multiBytes).ConfigureAwait(false); + } + + private async ValueTask ParseSubscriptionResultsAsync(byte[][] multiBytes) + { + int componentsPerMsg = IsPSubscription ? 4 : 3; + for (var i = 0; i < multiBytes.Length; i += componentsPerMsg) + { + var messageType = multiBytes[i]; + var channel = multiBytes[i + 1].FromUtf8Bytes(); + if (SubscribeWord.AreEqual(messageType) + || PSubscribeWord.AreEqual(messageType)) + { + IsPSubscription = PSubscribeWord.AreEqual(messageType); + + this.SubscriptionCount = int.Parse(multiBytes[i + MsgIndex].FromUtf8Bytes()); + + activeChannels.Add(channel); + + var tmp = OnSubscribeAsync; + if (tmp is object) await tmp.Invoke(channel).ConfigureAwait(false); + } + else if (UnSubscribeWord.AreEqual(messageType) + || PUnSubscribeWord.AreEqual(messageType)) + { + this.SubscriptionCount = int.Parse(multiBytes[i + 2].FromUtf8Bytes()); + + activeChannels.Remove(channel); + + var tmp = OnUnSubscribeAsync; + if (tmp is object) await tmp.Invoke(channel).ConfigureAwait(false); + } + else if (MessageWord.AreEqual(messageType)) + { + var msgBytes = multiBytes[i + MsgIndex]; + var tmp1 = OnMessageBytesAsync; + if (tmp1 is object) await tmp1.Invoke(channel, msgBytes).ConfigureAwait(false); + + var tmp2 = OnMessageAsync; + if (tmp2 is object) + { + var message = msgBytes.FromUtf8Bytes(); + await tmp2.Invoke(channel, message).ConfigureAwait(false); + } + } + else if (PMessageWord.AreEqual(messageType)) + { + channel = multiBytes[i + 2].FromUtf8Bytes(); + var msgBytes = multiBytes[i + MsgIndex + 1]; + var tmp1 = OnMessageBytesAsync; + if (tmp1 is object) await tmp1.Invoke(channel, msgBytes).ConfigureAwait(false); + + var tmp2 = OnMessageAsync; + if (tmp2 is object) + { + var message = msgBytes.FromUtf8Bytes(); + await tmp2.Invoke(channel, message).ConfigureAwait(false); + } + } + else + { + throw new RedisException( + "Invalid state. Expected [[p]subscribe|[p]unsubscribe|message] got: " + messageType.FromUtf8Bytes()); + } + } + } + + ValueTask IRedisSubscriptionAsync.SubscribeToChannelsAsync(params string[] channels) + => AsAsync().SubscribeToChannelsAsync(channels, token: default); + + ValueTask IRedisSubscriptionAsync.SubscribeToChannelsMatchingAsync(params string[] patterns) + => AsAsync().SubscribeToChannelsMatchingAsync(patterns, token: default); + + ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsAsync(params string[] channels) + => AsAsync().UnSubscribeFromChannelsAsync(channels, token: default); + + ValueTask IRedisSubscriptionAsync.UnSubscribeFromChannelsMatchingAsync(params string[] patterns) + => AsAsync().UnSubscribeFromChannelsMatchingAsync(patterns, token: default); + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/RedisSubscription.cs b/src/ServiceStack.Redis/RedisSubscription.cs index a7b03cc7..298b1bf4 100644 --- a/src/ServiceStack.Redis/RedisSubscription.cs +++ b/src/ServiceStack.Redis/RedisSubscription.cs @@ -4,7 +4,7 @@ namespace ServiceStack.Redis { - public class RedisSubscription + public partial class RedisSubscription : IRedisSubscription { private readonly IRedisNativeClient redisClient; @@ -30,6 +30,7 @@ public RedisSubscription(IRedisNativeClient redisClient) public Action OnSubscribe { get; set; } public Action OnMessage { get; set; } + public Action OnMessageBytes { get; set; } public Action OnUnSubscribe { get; set; } public void SubscribeToChannels(params string[] channels) @@ -72,10 +73,7 @@ private void ParseSubscriptionResults(byte[][] multiBytes) activeChannels.Add(channel); - if (this.OnSubscribe != null) - { - this.OnSubscribe(channel); - } + this.OnSubscribe?.Invoke(channel); } else if (UnSubscribeWord.AreEqual(messageType) || PUnSubscribeWord.AreEqual(messageType)) @@ -84,28 +82,24 @@ private void ParseSubscriptionResults(byte[][] multiBytes) activeChannels.Remove(channel); - if (this.OnUnSubscribe != null) - { - this.OnUnSubscribe(channel); - } + this.OnUnSubscribe?.Invoke(channel); } else if (MessageWord.AreEqual(messageType)) { - var message = multiBytes[i + MsgIndex].FromUtf8Bytes(); + var msgBytes = multiBytes[i + MsgIndex]; + this.OnMessageBytes?.Invoke(channel, msgBytes); - if (this.OnMessage != null) - { - this.OnMessage(channel, message); - } + var message = msgBytes.FromUtf8Bytes(); + this.OnMessage?.Invoke(channel, message); } else if (PMessageWord.AreEqual(messageType)) { - var message = multiBytes[i + MsgIndex + 1].FromUtf8Bytes(); channel = multiBytes[i + 2].FromUtf8Bytes(); - if (this.OnMessage != null) - { - this.OnMessage(channel, message); - } + var msgBytes = multiBytes[i + MsgIndex + 1]; + this.OnMessageBytes?.Invoke(channel, msgBytes); + + var message = msgBytes.FromUtf8Bytes(); + this.OnMessage?.Invoke(channel, message); } else { diff --git a/src/ServiceStack.Redis/ScanResult.cs b/src/ServiceStack.Redis/ScanResult.cs index dfc6958b..8a2e3248 100644 --- a/src/ServiceStack.Redis/ScanResult.cs +++ b/src/ServiceStack.Redis/ScanResult.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Globalization; namespace ServiceStack.Redis { @@ -15,7 +16,9 @@ public static Dictionary AsItemsWithScores(this ScanResult resul for (var i = 0; i < result.Results.Count; i += 2) { var key = result.Results[i]; - var score = double.Parse(result.Results[i + 1].FromUtf8Bytes()); + var score = double.Parse(result.Results[i + 1].FromUtf8Bytes(), + NumberStyles.Float, + CultureInfo.InvariantCulture); to[key.FromUtf8Bytes()] = score; } return to; @@ -33,4 +36,4 @@ public static Dictionary AsKeyValues(this ScanResult result) return to; } } -} \ No newline at end of file +} diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.Core.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.Core.csproj new file mode 100644 index 00000000..a2fe7181 --- /dev/null +++ b/src/ServiceStack.Redis/ServiceStack.Redis.Core.csproj @@ -0,0 +1,35 @@ + + + ServiceStack.Redis.Core + ServiceStack.Redis + ServiceStack.Redis + netstandard2.0;net6.0 + ServiceStack.Redis .NET Standard 2.0 + + .NET Standard 2.0 version of ServiceStack.Redis + + Redis;NoSQL;Client;Distributed;Cache;PubSub;Messaging;Transactions + + + $(DefineConstants);ASYNC_MEMORY + + + $(DefineConstants);ASYNC_MEMORY;NET6_0 + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.Signed.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.Signed.csproj deleted file mode 100644 index 94f7b514..00000000 --- a/src/ServiceStack.Redis/ServiceStack.Redis.Signed.csproj +++ /dev/null @@ -1,304 +0,0 @@ - - - - Debug - AnyCPU - 9.0.30729 - 2.0 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - Library - Properties - ServiceStack.Redis - ServiceStack.Redis - 512 - - - 3.5 - - publish\ - true - Disk - false - Foreground - 7 - Days - false - false - true - 0 - 1.0.0.%2a - false - false - true - v4.5 - - - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - AllRules.ruleset - false - - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - AllRules.ruleset - bin\Release\ServiceStack.Redis.XML - false - - - true - bin\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - false - - - true - bin\MonoTouch\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - false - - - true - bin\x86\Debug\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - false - - - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - x86 - prompt - AllRules.ruleset - false - false - bin\x86\Release\ - false - - - bin\Signed\ - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - AnyCPU - prompt - AllRules.ruleset - false - - - bin\x86\Signed\ - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - x86 - prompt - AllRules.ruleset - false - false - false - - - false - - - false - - - true - - - servicestack-sn.pfx - - - - ..\..\lib\signed\ServiceStack.Common.dll - - - False - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\signed\ServiceStack.Text.dll - - - - 3.5 - - - - 3.5 - - - 3.5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Code - - - - - - - - - - - - - - - - False - .NET Framework 3.5 SP1 Client Profile - false - - - False - .NET Framework 3.5 SP1 - true - - - False - Windows Installer 3.1 - true - - - - - \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.Signed.project.json b/src/ServiceStack.Redis/ServiceStack.Redis.Signed.project.json deleted file mode 100644 index 572bc9ed..00000000 --- a/src/ServiceStack.Redis/ServiceStack.Redis.Signed.project.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "frameworks": { - "net45": {} - }, - "runtimes": { - "win": {} - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.Source.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.Source.csproj new file mode 100644 index 00000000..3efa57ed --- /dev/null +++ b/src/ServiceStack.Redis/ServiceStack.Redis.Source.csproj @@ -0,0 +1,44 @@ + + + ServiceStack.Redis + ServiceStack.Redis + netstandard2.0;net6.0 + C# Redis client for the Redis NoSQL DB + + C# Redis Client for the worlds fastest distributed NoSQL datastore. + Byte[], String and POCO Typed clients. + Thread-Safe Basic and Pooled client managers included. + + Redis;NoSQL;Client;Distributed;Cache;PubSub;Messaging;Transactions + false + + + + $(DefineConstants);ASYNC_MEMORY + + + $(DefineConstants);ASYNC_MEMORY;NET6_0 + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.csproj b/src/ServiceStack.Redis/ServiceStack.Redis.csproj index ce857a9c..4e6274a9 100644 --- a/src/ServiceStack.Redis/ServiceStack.Redis.csproj +++ b/src/ServiceStack.Redis/ServiceStack.Redis.csproj @@ -1,298 +1,47 @@ - - + - Debug - AnyCPU - 9.0.30729 - 2.0 - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - Library - Properties - ServiceStack.Redis + ServiceStack.Redis ServiceStack.Redis - 512 - - - 3.5 - - publish\ - true - Disk - false - Foreground - 7 - Days - false - false - true - 0 - 1.0.0.%2a - false - false - true - v4.5 - + net472;netstandard2.0;netstandard2.1;net6.0 + C# Redis client for the Redis NoSQL DB + + C# Redis Client for the worlds fastest distributed NoSQL datastore. + Byte[], String and POCO Typed clients. + Thread-Safe Basic and Pooled client managers included. + + Redis;NoSQL;Client;Distributed;Cache;PubSub;Messaging;Transactions - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - AllRules.ruleset - false + + + $(DefineConstants);NET472 - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - AllRules.ruleset - bin\Release\ServiceStack.Redis.XML - false + + $(DefineConstants);NETCORE - - true - bin\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - false + + $(DefineConstants);ASYNC_MEMORY;NETCORE - - true - bin\MonoTouch\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - false - - - true - bin\x86\Debug\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - false - - - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - x86 - prompt - AllRules.ruleset - false - false - bin\x86\Release\ - false - - - bin\Signed\ - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - AnyCPU - prompt - AllRules.ruleset - false - - - bin\x86\Signed\ - TRACE - bin\Release\ServiceStack.Redis.XML - true - pdbonly - x86 - prompt - AllRules.ruleset - false - false - false - - - false - - - false + + $(DefineConstants);ASYNC_MEMORY;NETCORE;NET6_0 - - ..\..\lib\ServiceStack.Common.dll - - - False - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\ServiceStack.Text.dll - - - - 3.5 - + + + - - 3.5 - - - 3.5 - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Code - - - - - - - - - - - - + + + + + + + + + - - - False - .NET Framework 3.5 SP1 Client Profile - false - - - False - .NET Framework 3.5 SP1 - true - - - False - Windows Installer 3.1 - true - + - - + \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.csproj.user b/src/ServiceStack.Redis/ServiceStack.Redis.csproj.user deleted file mode 100644 index daa64de0..00000000 --- a/src/ServiceStack.Redis/ServiceStack.Redis.csproj.user +++ /dev/null @@ -1,14 +0,0 @@ - - - - ProjectFiles - - - - - - - en-US - false - - \ No newline at end of file diff --git a/src/ServiceStack.Redis/ServiceStack.Redis.project.json b/src/ServiceStack.Redis/ServiceStack.Redis.project.json deleted file mode 100644 index 572bc9ed..00000000 --- a/src/ServiceStack.Redis/ServiceStack.Redis.project.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "frameworks": { - "net45": {} - }, - "runtimes": { - "win": {} - } -} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs index 4466fc8b..7ee72602 100644 --- a/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs +++ b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientProxy.cs @@ -1,4 +1,4 @@ -#if !NETSTANDARD1_3 +#if !(NETSTANDARD2_0 || NETSTANDARD2_1 || NET6_0) using System; using System.Reflection; using System.Runtime.Remoting.Messaging; @@ -7,7 +7,7 @@ namespace ServiceStack.Redis.Support.Diagnostic { /// - /// Dynamically proxies access to the IRedisClient providing events for before & after each method invocation + /// Dynamically proxies access to the IRedisClient providing events for before & after each method invocation /// public class TrackingRedisClientProxy : System.Runtime.Remoting.Proxies.RealProxy { diff --git a/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs index 7f2ff89e..ab8216bd 100644 --- a/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs +++ b/src/ServiceStack.Redis/Support/Diagnostic/TrackingRedisClientsManager.cs @@ -1,4 +1,4 @@ -#if !NETSTANDARD1_3 +#if !(NETSTANDARD2_0 || NETSTANDARD2_1 || NET6_0) using System; using System.Collections.Generic; using System.Diagnostics; @@ -15,7 +15,7 @@ namespace ServiceStack.Redis.Support.Diagnostic /// Tracks each IRedisClient instance allocated from the IRedisClientsManager logging when they are allocated and disposed. /// Periodically writes the allocated instances to the log for diagnostic purposes. /// - public class TrackingRedisClientsManager : IRedisClientsManager + public partial class TrackingRedisClientsManager : IRedisClientsManager { private static readonly ILog Logger = LogManager.GetLogger(typeof(TrackingRedisClientsManager)); @@ -24,12 +24,7 @@ public class TrackingRedisClientsManager : IRedisClientsManager public TrackingRedisClientsManager(IRedisClientsManager redisClientsManager) { - if (redisClientsManager == null) - { - throw new ArgumentNullException("redisClientsManager"); - } - - this.redisClientsManager = redisClientsManager; + this.redisClientsManager = redisClientsManager ?? throw new ArgumentNullException(nameof(redisClientsManager)); Logger.DebugFormat("Constructed"); var timer = new Timer(state => this.DumpState()); diff --git a/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs b/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs index 44c6d7d4..96c891b1 100644 --- a/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/DisposableDistributedLock.cs @@ -8,7 +8,6 @@ namespace ServiceStack.Redis.Support.Locking public class DisposableDistributedLock : IDisposable { private readonly IDistributedLock myLock; - private readonly long lockState; private readonly long lockExpire; private readonly IRedisClient myClient; private readonly string globalLockKey; @@ -25,20 +24,12 @@ public DisposableDistributedLock(IRedisClient client, string globalLockKey, int myLock = new DistributedLock(); myClient = client; this.globalLockKey = globalLockKey; - lockState = myLock.Lock(globalLockKey, acquisitionTimeout, lockTimeout, out lockExpire, myClient); + LockState = myLock.Lock(globalLockKey, acquisitionTimeout, lockTimeout, out lockExpire, myClient); } + public long LockState { get; } - public long LockState - { - get { return lockState; } - } - - public long LockExpire - { - get { return lockExpire; } - } - + public long LockExpire => lockExpire; /// /// unlock diff --git a/src/ServiceStack.Redis/Support/Locking/DistributedLock.Async.cs b/src/ServiceStack.Redis/Support/Locking/DistributedLock.Async.cs new file mode 100644 index 00000000..4f79a42b --- /dev/null +++ b/src/ServiceStack.Redis/Support/Locking/DistributedLock.Async.cs @@ -0,0 +1,121 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Support.Locking +{ + partial class DistributedLock : IDistributedLockAsync + { + public IDistributedLockAsync AsAsync() => this; + + async ValueTask IDistributedLockAsync.LockAsync(string key, int acquisitionTimeout, int lockTimeout, IRedisClientAsync client, CancellationToken token) + { + long lockExpire = 0; + + // cannot lock on a null key + if (key == null) + return new LockState(LOCK_NOT_ACQUIRED, lockExpire); + + const int sleepIfLockSet = 200; + acquisitionTimeout *= 1000; //convert to ms + int tryCount = (acquisitionTimeout / sleepIfLockSet) + 1; + + var ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + var newLockExpire = CalculateLockExpire(ts, lockTimeout); + + var nativeClient = (IRedisNativeClientAsync)client; + long wasSet = await nativeClient.SetNXAsync(key, BitConverter.GetBytes(newLockExpire), token).ConfigureAwait(false); + int totalTime = 0; + while (wasSet == LOCK_NOT_ACQUIRED && totalTime < acquisitionTimeout) + { + int count = 0; + while (wasSet == 0 && count < tryCount && totalTime < acquisitionTimeout) + { + await Task.Delay(sleepIfLockSet).ConfigureAwait(false); + totalTime += sleepIfLockSet; + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); + wasSet = await nativeClient.SetNXAsync(key, BitConverter.GetBytes(newLockExpire), token).ConfigureAwait(false); + count++; + } + // acquired lock! + if (wasSet != LOCK_NOT_ACQUIRED) break; + + // handle possibliity of crashed client still holding the lock + var pipe = client.CreatePipeline(); + await using (pipe.ConfigureAwait(false)) + { + long lockValue = 0; + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).WatchAsync(new[] { key }, token)); + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).GetAsync(key, token), x => lockValue = (x != null) ? BitConverter.ToInt64(x, 0) : 0); + await pipe.FlushAsync(token).ConfigureAwait(false); + + // if lock value is 0 (key is empty), or expired, then we can try to acquire it + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + if (lockValue < ts.TotalSeconds) + { + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); + var trans = await client.CreateTransactionAsync(token).ConfigureAwait(false); + await using (trans.ConfigureAwait(false)) + { + var expire = newLockExpire; + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetAsync(key, BitConverter.GetBytes(expire), token: token)); + if (await trans.CommitAsync(token).ConfigureAwait(false)) + wasSet = LOCK_RECOVERED; //recovered lock! + } + } + else + { + await nativeClient.UnWatchAsync(token).ConfigureAwait(false); + } + } + if (wasSet != LOCK_NOT_ACQUIRED) break; + await Task.Delay(sleepIfLockSet).ConfigureAwait(false); + totalTime += sleepIfLockSet; + } + if (wasSet != LOCK_NOT_ACQUIRED) + { + lockExpire = newLockExpire; + } + return new LockState(wasSet, lockExpire); + } + + async ValueTask IDistributedLockAsync.UnlockAsync(string key, long lockExpire, IRedisClientAsync client, CancellationToken token) + { + if (lockExpire <= 0) + return false; + long lockVal = 0; + var nativeClient = (IRedisNativeClientAsync)client; + var pipe = client.CreatePipeline(); + await using (pipe.ConfigureAwait(false)) + { + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).WatchAsync(new[] { key }, token)); + pipe.QueueCommand(r => ((IRedisNativeClientAsync)r).GetAsync(key, token), + x => lockVal = (x != null) ? BitConverter.ToInt64(x, 0) : 0); + await pipe.FlushAsync(token).ConfigureAwait(false); + } + + if (lockVal != lockExpire) + { + if (lockVal != 0) + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been acquired by another client "); + else + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been identifed as a zombie and harvested "); + await nativeClient.UnWatchAsync(token).ConfigureAwait(false); + return false; + } + + var trans = await client.CreateTransactionAsync(token).ConfigureAwait(false); + await using (trans.ConfigureAwait(false)) + { + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).DelAsync(key, token)); + var rc = await trans.CommitAsync(token).ConfigureAwait(false); + if (!rc) + Debug.WriteLine($"Unlock(): Failed to delete key {key}; lock has been acquired by another client "); + return rc; + } + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs b/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs index 8e2d9660..84d1be68 100644 --- a/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/DistributedLock.cs @@ -3,132 +3,128 @@ namespace ServiceStack.Redis.Support.Locking { - public class DistributedLock : IDistributedLock - { + public partial class DistributedLock : IDistributedLock + { public const int LOCK_NOT_ACQUIRED = 0; public const int LOCK_ACQUIRED = 1; public const int LOCK_RECOVERED = 2; - /// - /// acquire distributed, non-reentrant lock on key - /// - /// global key for this lock - /// timeout for acquiring lock - /// timeout for lock, in seconds (stored as value against lock key) + /// + /// acquire distributed, non-reentrant lock on key + /// + /// global key for this lock + /// timeout for acquiring lock + /// timeout for lock, in seconds (stored as value against lock key) /// /// public virtual long Lock(string key, int acquisitionTimeout, int lockTimeout, out long lockExpire, IRedisClient client) - { - lockExpire = 0; + { + lockExpire = 0; // cannot lock on a null key if (key == null) return LOCK_NOT_ACQUIRED; - const int sleepIfLockSet = 200; - acquisitionTimeout *= 1000; //convert to ms - int tryCount = (acquisitionTimeout / sleepIfLockSet) + 1; + const int sleepIfLockSet = 200; + acquisitionTimeout *= 1000; //convert to ms + int tryCount = (acquisitionTimeout / sleepIfLockSet) + 1; - var ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - var newLockExpire = CalculateLockExpire(ts, lockTimeout); + var ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + var newLockExpire = CalculateLockExpire(ts, lockTimeout); var localClient = (RedisClient)client; long wasSet = localClient.SetNX(key, BitConverter.GetBytes(newLockExpire)); - int totalTime = 0; + int totalTime = 0; while (wasSet == LOCK_NOT_ACQUIRED && totalTime < acquisitionTimeout) - { - int count = 0; - while (wasSet == 0 && count < tryCount && totalTime < acquisitionTimeout) - { - TaskUtils.Sleep(sleepIfLockSet); - totalTime += sleepIfLockSet; - ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - newLockExpire = CalculateLockExpire(ts, lockTimeout); + { + int count = 0; + while (wasSet == 0 && count < tryCount && totalTime < acquisitionTimeout) + { + TaskUtils.Sleep(sleepIfLockSet); + totalTime += sleepIfLockSet; + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); wasSet = localClient.SetNX(key, BitConverter.GetBytes(newLockExpire)); - count++; - } - // acquired lock! + count++; + } + // acquired lock! if (wasSet != LOCK_NOT_ACQUIRED) break; - // handle possibliity of crashed client still holding the lock + // handle possibliity of crashed client still holding the lock using (var pipe = localClient.CreatePipeline()) - { - long lockValue=0; - pipe.QueueCommand(r => ((RedisNativeClient)r).Watch(key)); - pipe.QueueCommand(r => ((RedisNativeClient)r).Get(key), x => lockValue = (x != null) ? BitConverter.ToInt64(x,0) : 0); - pipe.Flush(); + { + long lockValue = 0; + pipe.QueueCommand(r => ((RedisNativeClient)r).Watch(key)); + pipe.QueueCommand(r => ((RedisNativeClient)r).Get(key), x => lockValue = (x != null) ? BitConverter.ToInt64(x, 0) : 0); + pipe.Flush(); - // if lock value is 0 (key is empty), or expired, then we can try to acquire it + // if lock value is 0 (key is empty), or expired, then we can try to acquire it ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - if (lockValue < ts.TotalSeconds) - { - ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); - newLockExpire = CalculateLockExpire(ts, lockTimeout); - using (var trans = localClient.CreateTransaction()) - { - var expire = newLockExpire; - trans.QueueCommand(r => ((RedisNativeClient)r).Set(key, BitConverter.GetBytes(expire))); - if (trans.Commit()) - wasSet = LOCK_RECOVERED; //recovered lock! - } - } - else - { + if (lockValue < ts.TotalSeconds) + { + ts = (DateTime.UtcNow - new DateTime(1970, 1, 1, 0, 0, 0)); + newLockExpire = CalculateLockExpire(ts, lockTimeout); + using (var trans = localClient.CreateTransaction()) + { + var expire = newLockExpire; + trans.QueueCommand(r => ((RedisNativeClient)r).Set(key, BitConverter.GetBytes(expire))); + if (trans.Commit()) + wasSet = LOCK_RECOVERED; //recovered lock! + } + } + else + { localClient.UnWatch(); - } - } + } + } if (wasSet != LOCK_NOT_ACQUIRED) break; - TaskUtils.Sleep(sleepIfLockSet); - totalTime += sleepIfLockSet; - } + TaskUtils.Sleep(sleepIfLockSet); + totalTime += sleepIfLockSet; + } if (wasSet != LOCK_NOT_ACQUIRED) { lockExpire = newLockExpire; } - return wasSet; - - } + return wasSet; + } - - /// - /// unlock key - /// - public virtual bool Unlock(string key, long lockExpire, IRedisClient client) - { - if (lockExpire <= 0) - return false; - long lockVal = 0; + /// + /// unlock key + /// + public virtual bool Unlock(string key, long lockExpire, IRedisClient client) + { + if (lockExpire <= 0) + return false; + long lockVal = 0; var localClient = (RedisClient)client; using (var pipe = localClient.CreatePipeline()) { - - pipe.QueueCommand(r => ((RedisNativeClient) r).Watch(key)); - pipe.QueueCommand(r => ((RedisNativeClient) r).Get(key), + + pipe.QueueCommand(r => ((RedisNativeClient)r).Watch(key)); + pipe.QueueCommand(r => ((RedisNativeClient)r).Get(key), x => lockVal = (x != null) ? BitConverter.ToInt64(x, 0) : 0); pipe.Flush(); } - if (lockVal != lockExpire) - { + if (lockVal != lockExpire) + { if (lockVal != 0) - Debug.WriteLine(String.Format("Unlock(): Failed to unlock key {0}; lock has been acquired by another client ", key)); + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been acquired by another client "); else - Debug.WriteLine(String.Format("Unlock(): Failed to unlock key {0}; lock has been identifed as a zombie and harvested ", key)); + Debug.WriteLine($"Unlock(): Failed to unlock key {key}; lock has been identifed as a zombie and harvested "); localClient.UnWatch(); - return false; - } + return false; + } using (var trans = localClient.CreateTransaction()) { trans.QueueCommand(r => ((RedisNativeClient)r).Del(key)); - var rc = trans.Commit(); + var rc = trans.Commit(); if (!rc) - Debug.WriteLine(String.Format("Unlock(): Failed to delete key {0}; lock has been acquired by another client ", key)); + Debug.WriteLine($"Unlock(): Failed to delete key {key}; lock has been acquired by another client "); return rc; } - - } - + } /// /// @@ -140,6 +136,5 @@ private static long CalculateLockExpire(TimeSpan ts, int timeout) { return (long)(ts.TotalSeconds + timeout + 1.5); } - - } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/IDistributedLock.Async.cs b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.Async.cs new file mode 100644 index 00000000..7b548b82 --- /dev/null +++ b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.Async.cs @@ -0,0 +1,36 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Support.Locking +{ + /// + /// Distributed lock interface + /// + public interface IDistributedLockAsync + { + // note: can't use "out" with async, so return LockState instead + ValueTask LockAsync(string key, int acquisitionTimeout, int lockTimeout, IRedisClientAsync client, CancellationToken token = default); + ValueTask UnlockAsync(string key, long lockExpire, IRedisClientAsync client, CancellationToken token = default); + } + + public readonly struct LockState + { + public long Result { get; } // kinda feels like this should be an enum; leaving alone for API parity (sync vs async) + public long Expiration { get; } + public LockState(long result, long expiration) + { + Result = result; + Expiration = expiration; + } + public override bool Equals(object obj) => throw new NotSupportedException(); + public override int GetHashCode() => throw new NotSupportedException(); + public override string ToString() => nameof(LockState); + + public void Deconstruct(out long result, out long expiration) + { + result = Result; + expiration = Expiration; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs index 4d331bcc..2f1fc405 100644 --- a/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/IDistributedLock.cs @@ -4,8 +4,8 @@ /// Distributed lock interface /// public interface IDistributedLock - { - long Lock(string key, int acquisitionTimeout, int lockTimeout, out long lockExpire, IRedisClient client); - bool Unlock(string key, long lockExpire, IRedisClient client); - } + { + long Lock(string key, int acquisitionTimeout, int lockTimeout, out long lockExpire, IRedisClient client); + bool Unlock(string key, long lockExpire, IRedisClient client); + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs b/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs index 94c97f3f..6a60aab6 100644 --- a/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs +++ b/src/ServiceStack.Redis/Support/Locking/ILockingStrategy.cs @@ -6,9 +6,9 @@ namespace ServiceStack.Redis.Support.Locking /// Locking strategy interface /// public interface ILockingStrategy - { - IDisposable ReadLock(); + { + IDisposable ReadLock(); - IDisposable WriteLock(); - } + IDisposable WriteLock(); + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs b/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs index 9f6c30b6..70a92057 100644 --- a/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs +++ b/src/ServiceStack.Redis/Support/Locking/NoLockingStrategy.cs @@ -2,16 +2,16 @@ namespace ServiceStack.Redis.Support.Locking { - public class NoLockingStrategy : ILockingStrategy - { - public IDisposable ReadLock() - { - return null; - } + public class NoLockingStrategy : ILockingStrategy + { + public IDisposable ReadLock() + { + return null; + } - public IDisposable WriteLock() - { - return null; - } - } + public IDisposable WriteLock() + { + return null; + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/ReadLock.cs b/src/ServiceStack.Redis/Support/Locking/ReadLock.cs index 847054b5..7cbb7db2 100644 --- a/src/ServiceStack.Redis/Support/Locking/ReadLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/ReadLock.cs @@ -3,30 +3,30 @@ namespace ServiceStack.Redis.Support.Locking { - /// - /// This class manages a read lock for a local readers/writer lock, - /// using the Resource Acquisition Is Initialization pattern - /// - public class ReadLock : IDisposable - { - private readonly ReaderWriterLockSlim lockObject; + /// + /// This class manages a read lock for a local readers/writer lock, + /// using the Resource Acquisition Is Initialization pattern + /// + public class ReadLock : IDisposable + { + private readonly ReaderWriterLockSlim lockObject; - /// - /// RAII initialization - /// - /// - public ReadLock(ReaderWriterLockSlim lockObject) - { - this.lockObject = lockObject; - lockObject.EnterReadLock(); - } + /// + /// RAII initialization + /// + /// + public ReadLock(ReaderWriterLockSlim lockObject) + { + this.lockObject = lockObject; + lockObject.EnterReadLock(); + } - /// - /// RAII disposal - /// - public void Dispose() - { - lockObject.ExitReadLock(); - } - } + /// + /// RAII disposal + /// + public void Dispose() + { + lockObject.ExitReadLock(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs b/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs index f3fba6cc..ba0e8763 100644 --- a/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs +++ b/src/ServiceStack.Redis/Support/Locking/ReaderWriterLockingStrategy.cs @@ -3,19 +3,18 @@ namespace ServiceStack.Redis.Support.Locking { - public class ReaderWriterLockingStrategy : ILockingStrategy - { - private readonly ReaderWriterLockSlim lockObject = new ReaderWriterLockSlim(); + public class ReaderWriterLockingStrategy : ILockingStrategy + { + private readonly ReaderWriterLockSlim lockObject = new ReaderWriterLockSlim(); + public IDisposable ReadLock() + { + return new ReadLock(lockObject); + } - public IDisposable ReadLock() - { - return new ReadLock(lockObject); - } - - public IDisposable WriteLock() - { - return new WriteLock(lockObject); - } - } + public IDisposable WriteLock() + { + return new WriteLock(lockObject); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/Locking/WriteLock.cs b/src/ServiceStack.Redis/Support/Locking/WriteLock.cs index 7de69462..3250dd8f 100644 --- a/src/ServiceStack.Redis/Support/Locking/WriteLock.cs +++ b/src/ServiceStack.Redis/Support/Locking/WriteLock.cs @@ -3,27 +3,27 @@ namespace ServiceStack.Redis.Support.Locking { - public class WriteLock : IDisposable - { - private readonly ReaderWriterLockSlim lockObject; + public class WriteLock : IDisposable + { + private readonly ReaderWriterLockSlim lockObject; - /// - /// This class manages a write lock for a local readers/writer lock, - /// using the Resource Acquisition Is Initialization pattern - /// - /// - public WriteLock(ReaderWriterLockSlim lockObject) - { - this.lockObject = lockObject; - lockObject.EnterWriteLock(); - } + /// + /// This class manages a write lock for a local readers/writer lock, + /// using the Resource Acquisition Is Initialization pattern + /// + /// + public WriteLock(ReaderWriterLockSlim lockObject) + { + this.lockObject = lockObject; + lockObject.EnterWriteLock(); + } - /// - /// RAII disposal - /// - public void Dispose() - { - lockObject.ExitWriteLock(); - } - } + /// + /// RAII disposal + /// + public void Dispose() + { + lockObject.ExitWriteLock(); + } + } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/Support/ObjectSerializer.cs b/src/ServiceStack.Redis/Support/ObjectSerializer.cs index 0d5df45b..5c756903 100644 --- a/src/ServiceStack.Redis/Support/ObjectSerializer.cs +++ b/src/ServiceStack.Redis/Support/ObjectSerializer.cs @@ -1,5 +1,5 @@ using System.IO; -#if !NETSTANDARD1_3 +#if !NETCORE using System.Runtime.Serialization.Formatters.Binary; #endif @@ -11,7 +11,7 @@ namespace ServiceStack.Redis.Support /// public class ObjectSerializer : ISerializer { -#if !NETSTANDARD1_3 +#if !NETCORE protected readonly BinaryFormatter bf = new BinaryFormatter(); #endif @@ -23,7 +23,7 @@ public class ObjectSerializer : ISerializer /// public virtual byte[] Serialize(object value) { -#if NETSTANDARD1_3 +#if NETCORE return null; #else if (value == null) @@ -42,7 +42,7 @@ public virtual byte[] Serialize(object value) /// public virtual object Deserialize(byte[] someBytes) { -#if NETSTANDARD1_3 +#if NETCORE return null; #else if (someBytes == null) diff --git a/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs b/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs index 95c60dc1..3d4dbf17 100644 --- a/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs +++ b/src/ServiceStack.Redis/Support/OptimizedObjectSerializer.cs @@ -122,7 +122,7 @@ SerializedObjectWrapper SerializeToWrapper(object value) break; default: -#if NETSTANDARD1_3 +#if NETCORE data = new byte[0]; length = 0; #else @@ -231,7 +231,7 @@ object Unwrap(SerializedObjectWrapper item) case TypeCode.Object: using (var ms = new MemoryStream(data, offset, count)) { -#if NETSTANDARD1_3 +#if NETCORE return null; #else return bf.Deserialize(ms); diff --git a/src/ServiceStack.Redis/Support/OrderedDictionary.cs b/src/ServiceStack.Redis/Support/OrderedDictionary.cs index ba296d77..636921d5 100644 --- a/src/ServiceStack.Redis/Support/OrderedDictionary.cs +++ b/src/ServiceStack.Redis/Support/OrderedDictionary.cs @@ -16,7 +16,7 @@ public class OrderedDictionary : IOrderedDictionary private static readonly string KeyTypeName = typeof(TKey).FullName; private static readonly string ValueTypeName = typeof(TValue).FullName; - private static readonly bool ValueTypeIsReferenceType = !typeof(ValueType).AssignableFrom(typeof(TValue)); + private static readonly bool ValueTypeIsReferenceType = !typeof(ValueType).IsAssignableFrom(typeof(TValue)); private Dictionary dictionary; private List> list; private readonly IEqualityComparer comparer; diff --git a/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs b/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs index 2217e173..e53c28cf 100644 --- a/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs +++ b/src/ServiceStack.Redis/Support/Queue/Implementation/SerializingRedisClient.cs @@ -61,10 +61,10 @@ public object Deserialize(byte[] someBytes) return serializer.Deserialize(someBytes); } + /// /// deserialize an array of byte arrays /// /// - /// public IList Deserialize(byte[][] byteArray) { IList rc = new ArrayList(); diff --git a/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs b/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs index fce2ef8e..0f06bed2 100644 --- a/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs +++ b/src/ServiceStack.Redis/Support/SerializedObjectWrapper.cs @@ -2,13 +2,11 @@ namespace ServiceStack.Redis.Support { - /// - /// wraps a serialized representation of an object - /// - /// -#if !NETSTANDARD1_3 - [Serializable] -#endif + /// + /// wraps a serialized representation of an object + /// + /// + [Serializable] public struct SerializedObjectWrapper { private ArraySegment data; @@ -30,8 +28,8 @@ public SerializedObjectWrapper(ushort flags, ArraySegment data) /// public ArraySegment Data { - get { return data; } - set { data = value; } + get => data; + set => data = value; } /// @@ -39,8 +37,8 @@ public ArraySegment Data /// public ushort Flags { - get { return flags; } - set { flags = value; } + get => flags; + set => flags = value; } } } diff --git a/src/ServiceStack.Redis/Transaction/RedisTransaction.Async.cs b/src/ServiceStack.Redis/Transaction/RedisTransaction.Async.cs new file mode 100644 index 00000000..43ca79c8 --- /dev/null +++ b/src/ServiceStack.Redis/Transaction/RedisTransaction.Async.cs @@ -0,0 +1,129 @@ +// +// https://github.com/ServiceStack/ServiceStack.Redis +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. +// +// Licensed under the same terms of ServiceStack. +// + +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Redis.Pipeline; + +namespace ServiceStack.Redis +{ + /// + /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). + /// + public partial class RedisTransaction + : IRedisTransactionAsync, IRedisQueueCompletableOperationAsync + { + /// + /// Issue exec command (not queued) + /// + private async ValueTask ExecAsync(CancellationToken token) + { + RedisClient.Exec(); + await RedisClient.FlushSendBufferAsync(token).ConfigureAwait(false); + RedisClient.ResetSendBuffer(); + } + + + /// + /// Put "QUEUED" messages at back of queue + /// + partial void QueueExpectQueuedAsync() + { + QueuedCommands.Insert(0, new QueuedRedisOperation + { + }.WithAsyncReadCommand(RedisClient.ExpectQueuedAsync)); + } + + async ValueTask IRedisTransactionAsync.CommitAsync(CancellationToken token) + { + bool rc = true; + try + { + numCommands = QueuedCommands.Count / 2; + + //insert multi command at beginning + QueuedCommands.Insert(0, new QueuedRedisCommand + { + }.WithAsyncReturnCommand(VoidReturnCommandAsync: r => { Init(); return default; }) + .WithAsyncReadCommand(RedisClient.ExpectOkAsync)); + + //the first half of the responses will be "QUEUED", + // so insert reading of multiline after these responses + QueuedCommands.Insert(numCommands + 1, new QueuedRedisOperation + { + OnSuccessIntCallback = handleMultiDataResultCount + }.WithAsyncReadCommand(RedisClient.ReadMultiDataResultCountAsync)); + + // add Exec command at end (not queued) + QueuedCommands.Add(new RedisCommand + { + }.WithAsyncReturnCommand(r => ExecAsync(token))); + + //execute transaction + await ExecAsync(token).ConfigureAwait(false); + + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (RedisTransactionFailedException) + { + rc = false; + } + finally + { + RedisClient.Transaction = null; + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + return rc; + } + + ValueTask IRedisTransactionAsync.RollbackAsync(CancellationToken token) + { + Rollback(); // not currently anything different to do on the async path + return default; + } + // note: this also means that Dispose doesn't need to be complex; if Rollback needed + // splitting, we would need to override DisposeAsync and split the code, too + + + private protected override async ValueTask ReplayAsync(CancellationToken token) + { + bool rc = true; + try + { + await ExecuteAsync().ConfigureAwait(false); + + //receive expected results + foreach (var queuedCommand in QueuedCommands) + { + await queuedCommand.ProcessResultAsync(token).ConfigureAwait(false); + } + } + catch (RedisTransactionFailedException) + { + rc = false; + } + finally + { + RedisClient.Transaction = null; + ClosePipeline(); + await RedisClient.AddTypeIdsRegisteredDuringPipelineAsync(token).ConfigureAwait(false); + } + return rc; + } + } +} \ No newline at end of file diff --git a/src/ServiceStack.Redis/Transaction/RedisTransaction.cs b/src/ServiceStack.Redis/Transaction/RedisTransaction.cs index 608f27c3..db2c36b2 100644 --- a/src/ServiceStack.Redis/Transaction/RedisTransaction.cs +++ b/src/ServiceStack.Redis/Transaction/RedisTransaction.cs @@ -5,7 +5,7 @@ // Authors: // Demis Bellot (demis.bellot@gmail.com) // -// Copyright 2013 Service Stack LLC. All Rights Reserved. +// Copyright 2013 ServiceStack, Inc. All Rights Reserved. // // Licensed under the same terms of ServiceStack. // @@ -18,12 +18,20 @@ namespace ServiceStack.Redis /// /// Adds support for Redis Transactions (i.e. MULTI/EXEC/DISCARD operations). /// - public class RedisTransaction + public partial class RedisTransaction : RedisAllPurposePipeline, IRedisTransaction, IRedisQueueCompletableOperation { private int numCommands = 0; public RedisTransaction(RedisClient redisClient) - : base(redisClient) {} + : this(redisClient, false) {} + + internal RedisTransaction(RedisClient redisClient, bool isAsync) + : base(redisClient) + { + // if someone casts between sync/async: the sync-over-async or + // async-over-sync is entirely self-inflicted; I can't fix stupid + _isAsync = isAsync; + } protected override void Init() { @@ -128,7 +136,7 @@ public void Rollback() RedisClient.ClearTypeIdsRegisteredDuringPipeline(); } - public bool Replay() + public override bool Replay() { bool rc = true; try @@ -141,7 +149,7 @@ public bool Replay() queuedCommand.ProcessResult(); } } - catch (RedisTransactionFailedException e) + catch (RedisTransactionFailedException) { rc = false; } @@ -154,17 +162,26 @@ public bool Replay() return rc; } - public void Dispose() + public override void Dispose() { base.Dispose(); if (RedisClient.Transaction == null) return; Rollback(); } + private readonly bool _isAsync; protected override void AddCurrentQueuedOperation() { base.AddCurrentQueuedOperation(); - QueueExpectQueued(); + if (_isAsync) + { + QueueExpectQueuedAsync(); + } + else + { + QueueExpectQueued(); + } } + partial void QueueExpectQueuedAsync(); } } \ No newline at end of file diff --git a/src/ServiceStack.Redis/ValueTask_Utils.Async.cs b/src/ServiceStack.Redis/ValueTask_Utils.Async.cs new file mode 100644 index 00000000..87f6c917 --- /dev/null +++ b/src/ServiceStack.Redis/ValueTask_Utils.Async.cs @@ -0,0 +1,166 @@ +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Internal +{ + internal static class ValueTask_Utils + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + _ = pending.Result; // for IValueTaskSource reasons + return default; + } + else + { + return Awaited(pending); + } + static async ValueTask Awaited(ValueTask pending) + => await pending.ConfigureAwait(false); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending, Func projection) + { + return pending.IsCompletedSuccessfully ? projection(pending.Result).AsValueTaskResult() : Awaited(pending, projection); + static async ValueTask Awaited(ValueTask pending, Func projection) + => projection(await pending.ConfigureAwait(false)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AsInt32(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? (checked((int)pending.Result)).AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => checked((int)await pending.ConfigureAwait(false)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending, Func projection, TState state) + { + return pending.IsCompletedSuccessfully ? projection(pending.Result, state).AsValueTaskResult() : Awaited(pending, projection, state); + static async ValueTask Awaited(ValueTask pending, Func projection, TState state) + => projection(await pending.ConfigureAwait(false), state); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AwaitAsTrue(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + pending.GetAwaiter().GetResult(); // for IValueTaskSource reasons + return s_ValueTaskTrue; + } + else + { + return Awaited(pending); + } + static async ValueTask Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static Task AwaitAsTrueTask(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + pending.GetAwaiter().GetResult(); // for IValueTaskSource reasons + return s_TaskTrue; + } + else + { + return Awaited(pending); + } + static async Task Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AwaitAsTrue(this ValueTask pending) + { + if (pending.IsCompletedSuccessfully) + { + _ = pending.Result; // for IValueTaskSource reasons + return s_ValueTaskTrue; + } + else + { + return Awaited(pending); + } + static async ValueTask Awaited(ValueTask pending) + { + await pending.ConfigureAwait(false); + return true; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask IsSuccessAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? (pending.Result == RedisNativeClient.Success).AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)) == RedisNativeClient.Success; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static Task IsSuccessTaskAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? (pending.Result == RedisNativeClient.Success ? s_TaskTrue : s_TaskFalse) : Awaited(pending); + static async Task Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)) == RedisNativeClient.Success; + } + + static readonly Task s_TaskTrue = Task.FromResult(true), s_TaskFalse = Task.FromResult(false); + + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask> ConvertEachToAsync(this ValueTask> pending) + { + return pending.IsCompletedSuccessfully ? pending.Result.ConvertEachTo().AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask> pending) + => (await pending.ConfigureAwait(false)).ConvertEachTo(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask> ToStringListAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? pending.Result.ToStringList().AsValueTaskResult() : Awaited(pending); + static async ValueTask> Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)).ToStringList(); + } + + private static readonly ValueTask s_ValueTaskTrue = true.AsValueTaskResult(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask Await(this ValueTask pending, T result) + { + return pending.IsCompletedSuccessfully ? result.AsValueTaskResult() : Awaited(pending, result); + static async ValueTask Awaited(ValueTask pending, T result) + { + await pending.ConfigureAwait(false); + return result; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask AsValueTaskResult(this T value) => new ValueTask(value); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static ValueTask FromUtf8BytesAsync(this ValueTask pending) + { + return pending.IsCompletedSuccessfully ? pending.Result.FromUtf8Bytes().AsValueTaskResult() : Awaited(pending); + static async ValueTask Awaited(ValueTask pending) + => (await pending.ConfigureAwait(false)).FromUtf8Bytes(); + } + } +} diff --git a/src/ServiceStack.Redis/project.json b/src/ServiceStack.Redis/project.json deleted file mode 100644 index f9b63d1a..00000000 --- a/src/ServiceStack.Redis/project.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": ["DEBUG", "TRACE"] - } - }, - "Release": { - "buildOptions": { - "define": ["TRACE"], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0", - "ServiceStack.Interfaces" : "1.0.*", - "ServiceStack.Text" : "1.0.*", - "ServiceStack.Common" : "1.0.*" - }, - "frameworks": { - "netstandard1.3": { - "dependencies" : { - "System.Net.Security" : "4.0.0", - "System.Collections.Specialized": "4.0.1", - "System.Collections.NonGeneric": "4.0.1", - "System.Net.NameResolution" : "4.0.0" - } - } - - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/ServiceStack.Text/project.json b/src/ServiceStack.Text/project.json deleted file mode 100644 index 055e8a18..00000000 --- a/src/ServiceStack.Text/project.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": [ - "DEBUG", - "TRACE" - ] - } - }, - "Release": { - "buildOptions": { - "define": [ - "TRACE" - ], - "optimize": true - } - } - }, - "dependencies": { - "NETStandard.Library": "1.6.0" - }, - "frameworks": { - "netstandard1.1": { - "dependencies": {}, - "bin": { - "assembly": "../../lib/netcore/ServiceStack.Text.dll", - "pdb": "../../lib/netcore/ServiceStack.Text.pdb" - } - } - }, - "version": "1.0.0" -} \ No newline at end of file diff --git a/src/StackExchangeTester/App.config b/src/StackExchangeTester/App.config new file mode 100644 index 00000000..731f6de6 --- /dev/null +++ b/src/StackExchangeTester/App.config @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/src/StackExchangeTester/Program.cs b/src/StackExchangeTester/Program.cs new file mode 100644 index 00000000..def9166c --- /dev/null +++ b/src/StackExchangeTester/Program.cs @@ -0,0 +1,22 @@ +using ServiceStack.Redis; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace StackExchangeTester +{ + class Program + { + static void Main(string[] args) + { + var x = new RedisManagerPool("MQHnkdl402DScXzhZIxHwDaA7s8nziy45okp84ykShA=@tls-11.redis.cache.windows.net:6380?ssl=true&sslprotocols=Tls11"); + var y = x.GetClient(); + y.Ping(); + y.Set("keyServiceStackSllChangesIStillHave512mb", "value"); + y.Dispose(); + x.Dispose(); + } + } +} diff --git a/src/StackExchangeTester/StackExchangeTester.csproj b/src/StackExchangeTester/StackExchangeTester.csproj new file mode 100644 index 00000000..83e8e98a --- /dev/null +++ b/src/StackExchangeTester/StackExchangeTester.csproj @@ -0,0 +1,85 @@ + + + + + Debug + AnyCPU + {0214D0F0-EA41-4593-B558-71F974CF7C62} + Exe + StackExchangeTester + StackExchangeTester + v4.6.1 + 512 + true + true + + + AnyCPU + true + full + false + bin\Debug\ + DEBUG;TRACE + prompt + 4 + + + AnyCPU + pdbonly + true + bin\Release\ + TRACE + prompt + 4 + + + + + + + + + + + + + + + + + + + + + {8368c965-b4f6-4263-9abb-731a175b2e77} + Console.Tests + + + {91c55091-a946-49b5-9517-8794ebcc5784} + ServiceStack.Redis.Tests.Sentinel + + + {951d28ee-5d22-4c62-ac0f-1661a8ceec5a} + ServiceStack.Redis.Tests + + + {af99f19b-4c04-4f58-81ef-b092f1fcc540} + ServiceStack.Redis + + + + + {EDE973DE-4C9A-11DE-A33F-06DC55D89593} + 1 + 0 + 0 + tlbimp + False + True + + + + + + + \ No newline at end of file diff --git a/src/TestMqHost/TestMqHost.csproj b/src/TestMqHost/TestMqHost.csproj index fd81754c..14f23ae4 100644 --- a/src/TestMqHost/TestMqHost.csproj +++ b/src/TestMqHost/TestMqHost.csproj @@ -33,17 +33,17 @@ 4 - + False - ..\..\lib\ServiceStack.Common.dll + ..\..\lib\net45\ServiceStack.Common.dll - + False - ..\..\lib\ServiceStack.Interfaces.dll + ..\..\lib\net45\ServiceStack.Interfaces.dll - + False - ..\..\lib\ServiceStack.Text.dll + ..\..\lib\net45\ServiceStack.Text.dll diff --git a/src/packages/NUnit.2.6.3/NUnit.2.6.3.nupkg b/src/packages/NUnit.2.6.3/NUnit.2.6.3.nupkg deleted file mode 100644 index 61e3a5ec..00000000 Binary files a/src/packages/NUnit.2.6.3/NUnit.2.6.3.nupkg and /dev/null differ diff --git a/src/packages/NUnit.2.6.3/NUnit.2.6.3.nuspec b/src/packages/NUnit.2.6.3/NUnit.2.6.3.nuspec deleted file mode 100644 index 16ce830a..00000000 --- a/src/packages/NUnit.2.6.3/NUnit.2.6.3.nuspec +++ /dev/null @@ -1,31 +0,0 @@ - - - - NUnit - 2.6.3 - NUnit - Charlie Poole - Charlie Poole - http://nunit.org/nuget/license.html - http://nunit.org/ - http://nunit.org/nuget/nunit_32x32.png - false - NUnit features a fluent assert syntax, parameterized, generic and theory tests and is user-extensible. A number of runners, both from the NUnit project and by third parties, are able to execute NUnit tests. - -Version 2.6 is the seventh major release of this well-known and well-tested programming tool. - -This package includes only the framework assembly. You will need to install the NUnit.Runners package unless you are using a third-party runner. - NUnit is a unit-testing framework for all .Net languages with a strong TDD focus. - Version 2.6 is the seventh major release of NUnit. - -Unlike earlier versions, this package includes only the framework assembly. You will need to install the NUnit.Runners package unless you are using a third-party runner. - -The nunit.mocks assembly is now provided by the NUnit.Mocks package. The pnunit.framework assembly is provided by the pNUnit package. - - en-US - nunit test testing tdd framework fluent assert theory plugin addin - - - - - \ No newline at end of file diff --git a/src/packages/NUnit.2.6.3/lib/nunit.framework.dll b/src/packages/NUnit.2.6.3/lib/nunit.framework.dll deleted file mode 100644 index 780727f2..00000000 Binary files a/src/packages/NUnit.2.6.3/lib/nunit.framework.dll and /dev/null differ diff --git a/src/packages/NUnit.2.6.3/lib/nunit.framework.xml b/src/packages/NUnit.2.6.3/lib/nunit.framework.xml deleted file mode 100644 index f40847c7..00000000 --- a/src/packages/NUnit.2.6.3/lib/nunit.framework.xml +++ /dev/null @@ -1,10960 +0,0 @@ - - - - nunit.framework - - - - - The different targets a test action attribute can be applied to - - - - - Default target, which is determined by where the action attribute is attached - - - - - Target a individual test case - - - - - Target a suite of test cases - - - - - Delegate used by tests that execute code and - capture any thrown exception. - - - - - The Assert class contains a collection of static methods that - implement the most common assertions used in NUnit. - - - - - We don't actually want any instances of this object, but some people - like to inherit from it to add other static methods. Hence, the - protected constructor disallows any instances of this object. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Throws a with the message and arguments - that are passed in. This allows a test to be cut short, with a result - of success returned to NUnit. - - The message to initialize the with. - Arguments to be used in formatting the message - - - - Throws a with the message and arguments - that are passed in. This allows a test to be cut short, with a result - of success returned to NUnit. - - The message to initialize the with. - - - - Throws a with the message and arguments - that are passed in. This allows a test to be cut short, with a result - of success returned to NUnit. - - - - - Throws an with the message and arguments - that are passed in. This is used by the other Assert functions. - - The message to initialize the with. - Arguments to be used in formatting the message - - - - Throws an with the message that is - passed in. This is used by the other Assert functions. - - The message to initialize the with. - - - - Throws an . - This is used by the other Assert functions. - - - - - Throws an with the message and arguments - that are passed in. This causes the test to be reported as ignored. - - The message to initialize the with. - Arguments to be used in formatting the message - - - - Throws an with the message that is - passed in. This causes the test to be reported as ignored. - - The message to initialize the with. - - - - Throws an . - This causes the test to be reported as ignored. - - - - - Throws an with the message and arguments - that are passed in. This causes the test to be reported as inconclusive. - - The message to initialize the with. - Arguments to be used in formatting the message - - - - Throws an with the message that is - passed in. This causes the test to be reported as inconclusive. - - The message to initialize the with. - - - - Throws an . - This causes the test to be reported as Inconclusive. - - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint expression to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - An ActualValueDelegate returning the value to be tested - A Constraint expression to be applied - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - An ActualValueDelegate returning the value to be tested - A Constraint expression to be applied - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - An ActualValueDelegate returning the value to be tested - A Constraint expression to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - The message that will be displayed on failure - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that the code represented by a delegate throws an exception - that satisfies the constraint provided. - - A TestDelegate to be executed - A ThrowsConstraint used in the test - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - Used as a synonym for That in rare cases where a private setter - causes a Visual Basic compilation error. - - The actual value to test - A Constraint to be applied - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - Used as a synonym for That in rare cases where a private setter - causes a Visual Basic compilation error. - - The actual value to test - A Constraint to be applied - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - Used as a synonym for That in rare cases where a private setter - causes a Visual Basic compilation error. - - - This method is provided for use by VB developers needing to test - the value of properties with private setters. - - The actual value to test - A Constraint expression to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws a particular exception when called. - - A constraint to be satisfied by the exception - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws a particular exception when called. - - A constraint to be satisfied by the exception - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate throws a particular exception when called. - - A constraint to be satisfied by the exception - A TestDelegate - - - - Verifies that a delegate throws a particular exception when called. - - The exception Type expected - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws a particular exception when called. - - The exception Type expected - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate throws a particular exception when called. - - The exception Type expected - A TestDelegate - - - - Verifies that a delegate throws a particular exception when called. - - Type of the expected exception - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws a particular exception when called. - - Type of the expected exception - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate throws a particular exception when called. - - Type of the expected exception - A TestDelegate - - - - Verifies that a delegate throws an exception when called - and returns it. - - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws an exception when called - and returns it. - - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate throws an exception when called - and returns it. - - A TestDelegate - - - - Verifies that a delegate throws an exception of a certain Type - or one derived from it when called and returns it. - - The expected Exception Type - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws an exception of a certain Type - or one derived from it when called and returns it. - - The expected Exception Type - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate throws an exception of a certain Type - or one derived from it when called and returns it. - - The expected Exception Type - A TestDelegate - - - - Verifies that a delegate throws an exception of a certain Type - or one derived from it when called and returns it. - - The expected Exception Type - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate throws an exception of a certain Type - or one derived from it when called and returns it. - - The expected Exception Type - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate throws an exception of a certain Type - or one derived from it when called and returns it. - - The expected Exception Type - A TestDelegate - - - - Verifies that a delegate does not throw an exception - - A TestDelegate - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Verifies that a delegate does not throw an exception. - - A TestDelegate - The message that will be displayed on failure - - - - Verifies that a delegate does not throw an exception. - - A TestDelegate - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display in case of failure - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display in case of failure - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - The message to display in case of failure - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - The message to display in case of failure - - - - Asserts that a condition is false. If the condition is true the method throws - an . - - The evaluated condition - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - - - - Verifies that the object that is passed in is not equal to null - If the object is null then an - is thrown. - - The object that is to be tested - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - The message to display in case of failure - - - - Verifies that the object that is passed in is equal to null - If the object is not null then an - is thrown. - - The object that is to be tested - - - - Verifies that two ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two unsigned ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two unsigned ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two unsigned ints are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two unsigned longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two unsigned longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two unsigned longs are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two decimals are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two decimals are equal. If they are not, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two decimals are equal. If they are not, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equal then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equal then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message to display in case of failure - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equal then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equal then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equal then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message to display in case of failure - - - - Verifies that two doubles are equal considering a delta. If the - expected value is infinity then the delta value is ignored. If - they are not equal then an is - thrown. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - - - - Verifies that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. NUnit - has special semantics for some object types. - If they are not equal an is thrown. - - The value that is expected - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. NUnit - has special semantics for some object types. - If they are not equal an is thrown. - - The value that is expected - The actual value - The message to display in case of failure - - - - Verifies that two objects are equal. Two objects are considered - equal if both are null, or if both have the same value. NUnit - has special semantics for some object types. - If they are not equal an is thrown. - - The value that is expected - The actual value - - - - Verifies that two ints are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two ints are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two ints are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two longs are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two longs are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two longs are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two unsigned ints are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two unsigned ints are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two unsigned ints are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two unsigned longs are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two unsigned longs are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two unsigned longs are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two decimals are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two decimals are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two decimals are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two floats are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two floats are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two floats are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two doubles are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two doubles are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - The message to display in case of failure - - - - Verifies that two doubles are not equal. If they are equal, then an - is thrown. - - The expected value - The actual value - - - - Verifies that two objects are not equal. Two objects are considered - equal if both are null, or if both have the same value. NUnit - has special semantics for some object types. - If they are equal an is thrown. - - The value that is expected - The actual value - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that two objects are not equal. Two objects are considered - equal if both are null, or if both have the same value. NUnit - has special semantics for some object types. - If they are equal an is thrown. - - The value that is expected - The actual value - The message to display in case of failure - - - - Verifies that two objects are not equal. Two objects are considered - equal if both are null, or if both have the same value. NUnit - has special semantics for some object types. - If they are equal an is thrown. - - The value that is expected - The actual value - - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - The expected object - The actual object - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - The expected object - The actual object - The message to display in case of failure - - - - Asserts that two objects refer to the same object. If they - are not the same an is thrown. - - The expected object - The actual object - - - - Asserts that two objects do not refer to the same object. If they - are the same an is thrown. - - The expected object - The actual object - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that two objects do not refer to the same object. If they - are the same an is thrown. - - The expected object - The actual object - The message to display in case of failure - - - - Asserts that two objects do not refer to the same object. If they - are the same an is thrown. - - The expected object - The actual object - - - - Verifies that the double that is passed in is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the double that is passed in is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - The message to display in case of failure - - - - Verifies that the double that is passed in is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - - - - Verifies that the double that is passed in is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the double that is passed in is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - The message to display in case of failure - - - - Verifies that the double that is passed in is an NaN value. - If the object is not NaN then an - is thrown. - - The value that is to be tested - - - - Assert that a string is empty - that is equal to string.Empty - - The string to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Assert that a string is empty - that is equal to string.Empty - - The string to be tested - The message to display in case of failure - - - - Assert that a string is empty - that is equal to string.Empty - - The string to be tested - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - The message to display in case of failure - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing ICollection - - - - Assert that a string is not empty - that is not equal to string.Empty - - The string to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Assert that a string is not empty - that is not equal to string.Empty - - The string to be tested - The message to display in case of failure - - - - Assert that a string is not empty - that is not equal to string.Empty - - The string to be tested - - - - Assert that an array, list or other collection is not empty - - An array, list or other collection implementing ICollection - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Assert that an array, list or other collection is not empty - - An array, list or other collection implementing ICollection - The message to display in case of failure - - - - Assert that an array, list or other collection is not empty - - An array, list or other collection implementing ICollection - - - - Assert that a string is either null or equal to string.Empty - - The string to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Assert that a string is either null or equal to string.Empty - - The string to be tested - The message to display in case of failure - - - - Assert that a string is either null or equal to string.Empty - - The string to be tested - - - - Assert that a string is not null or empty - - The string to be tested - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Assert that a string is not null or empty - - The string to be tested - The message to display in case of failure - - - - Assert that a string is not null or empty - - The string to be tested - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - - - - Asserts that an object may be assigned a value of a given Type. - - The expected Type. - The object under examination - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - The message to display in case of failure - - - - Asserts that an object may not be assigned a value of a given Type. - - The expected Type. - The object under examination - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - - - - Asserts that an object is an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - The message to display in case of failure - - - - Asserts that an object is not an instance of a given type. - - The expected Type - The object being examined - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than the second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - The message to display in case of failure - - - - Verifies that the first value is greater than or equal tothe second - value. If it is not, then an - is thrown. - - The first value, expected to be greater - The second value, expected to be less - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - The message to display in case of failure - - - - Verifies that the first value is less than or equal to the second - value. If it is not, then an - is thrown. - - The first value, expected to be less - The second value, expected to be greater - - - - Asserts that an object is contained in a list. - - The expected object - The list to be examined - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Asserts that an object is contained in a list. - - The expected object - The list to be examined - The message to display in case of failure - - - - Asserts that an object is contained in a list. - - The expected object - The list to be examined - - - - Helper for Assert.AreEqual(double expected, double actual, ...) - allowing code generation to work consistently. - - The expected value - The actual value - The maximum acceptable difference between the - the expected and the actual - The message to display in case of failure - Array of objects to be used in formatting the message - - - - Gets the number of assertions executed so far and - resets the counter to zero. - - - - - AssertionHelper is an optional base class for user tests, - allowing the use of shorter names for constraints and - asserts and avoiding conflict with the definition of - , from which it inherits much of its - behavior, in certain mock object frameworks. - - - - - Helper class with properties and methods that supply - a number of constraints used in Asserts. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding only if a specified number of them succeed. - - - - - Returns a new PropertyConstraintExpression, which will either - test for the existence of the named property on the object - being tested or apply any following constraint to that property. - - - - - Returns a new AttributeConstraint checking for the - presence of a particular attribute on an object. - - - - - Returns a new AttributeConstraint checking for the - presence of a particular attribute on an object. - - - - - Returns a constraint that tests two items for equality - - - - - Returns a constraint that tests that two references are the same object - - - - - Returns a constraint that tests whether the - actual value is greater than the suppled argument - - - - - Returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is a collection containing the same elements as the - collection supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is a subset of the collection supplied as an argument. - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - - - - Returns a new ContainsConstraint. This constraint - will, in turn, make use of the appropriate second-level - constraint, depending on the type of the actual argument. - This overload is only used if the item sought is a string, - since any other type implies that we are looking for a - collection member. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that fails if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that fails if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that fails if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value matches the regular expression supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value matches the regular expression supplied as an argument. - - - - - Returns a constraint that fails if the actual - value matches the pattern supplied as an argument. - - - - - Returns a constraint that tests whether the path provided - is the same as an expected path after canonicalization. - - - - - Returns a constraint that tests whether the path provided - is the same path or under an expected path after canonicalization. - - - - - Returns a constraint that tests whether the path provided - is the same path or under an expected path after canonicalization. - - - - - Returns a constraint that tests whether the actual value falls - within a specified range. - - - - - Returns a ConstraintExpression that negates any - following constraint. - - - - - Returns a ConstraintExpression that negates any - following constraint. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if at least one of them succeeds. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them fail. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Length property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Count property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Message property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the InnerException property of the object being tested. - - - - - Returns a constraint that tests for null - - - - - Returns a constraint that tests for True - - - - - Returns a constraint that tests for False - - - - - Returns a constraint that tests for a positive value - - - - - Returns a constraint that tests for a negative value - - - - - Returns a constraint that tests for NaN - - - - - Returns a constraint that tests for empty - - - - - Returns a constraint that tests whether a collection - contains all unique items. - - - - - Returns a constraint that tests whether an object graph is serializable in binary format. - - - - - Returns a constraint that tests whether an object graph is serializable in xml format. - - - - - Returns a constraint that tests whether a collection is ordered - - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. Works - identically to Assert.That. - - The actual value to test - A Constraint to be applied - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. Works - identically to Assert.That. - - The actual value to test - A Constraint to be applied - The message to be displayed in case of failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. Works - identically to Assert.That. - - The actual value to test - A Constraint to be applied - The message to be displayed in case of failure - Arguments to use in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . Works Identically to - . - - The evaluated condition - The message to display if the condition is false - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . Works Identically to - . - - The evaluated condition - The message to display if the condition is false - - - - Asserts that a condition is true. If the condition is false the method throws - an . Works Identically to . - - The evaluated condition - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - A Constraint expression to be applied - An ActualValueDelegate returning the value to be tested - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - A Constraint expression to be applied - An ActualValueDelegate returning the value to be tested - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - An ActualValueDelegate returning the value to be tested - A Constraint expression to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - The message that will be displayed on failure - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an assertion exception on failure. - - The actual value to test - A Constraint to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that the code represented by a delegate throws an exception - that satisfies the constraint provided. - - A TestDelegate to be executed - A ThrowsConstraint used in the test - - - - Returns a ListMapper based on a collection. - - The original collection - - - - - Provides static methods to express the assumptions - that must be met for a test to give a meaningful - result. If an assumption is not met, the test - should produce an inconclusive result. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - The actual value to test - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - The actual value to test - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - The actual value to test - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - Arguments to be used in formatting the message - - - - Asserts that a condition is true. If the condition is false the method throws - an . - - The evaluated condition - The message to display if the condition is false - - - - Asserts that a condition is true. If the condition is false the - method throws an . - - The evaluated condition - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - An ActualValueDelegate returning the value to be tested - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - An ActualValueDelegate returning the value to be tested - The message that will be displayed on failure - - - - Apply a constraint to an actual value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - An ActualValueDelegate returning the value to be tested - A Constraint expression to be applied - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - The actual value to test - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - The actual value to test - The message that will be displayed on failure - - - - Apply a constraint to a referenced value, succeeding if the constraint - is satisfied and throwing an InconclusiveException on failure. - - A Constraint expression to be applied - The actual value to test - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that the code represented by a delegate throws an exception - that satisfies the constraint provided. - - A TestDelegate to be executed - A ThrowsConstraint used in the test - - - - Waits for pending asynchronous operations to complete, if appropriate, - and returns a proper result of the invocation by unwrapping task results - - The raw result of the method invocation - The unwrapped result, if necessary - - - - A set of Assert methods operationg on one or more collections - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Asserts that all items contained in collection are of the type specified by expectedType. - - IEnumerable containing objects to be considered - System.Type that all objects in collection must be instances of - - - - Asserts that all items contained in collection are of the type specified by expectedType. - - IEnumerable containing objects to be considered - System.Type that all objects in collection must be instances of - The message that will be displayed on failure - - - - Asserts that all items contained in collection are of the type specified by expectedType. - - IEnumerable containing objects to be considered - System.Type that all objects in collection must be instances of - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that all items contained in collection are not equal to null. - - IEnumerable containing objects to be considered - - - - Asserts that all items contained in collection are not equal to null. - - IEnumerable containing objects to be considered - The message that will be displayed on failure - - - - Asserts that all items contained in collection are not equal to null. - - IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Ensures that every object contained in collection exists within the collection - once and only once. - - IEnumerable of objects to be considered - - - - Ensures that every object contained in collection exists within the collection - once and only once. - - IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Ensures that every object contained in collection exists within the collection - once and only once. - - IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are exactly equal. The collections must have the same count, - and contain the exact same objects in the same order. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are equivalent, containing the same objects but the match may be in any order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are equivalent, containing the same objects but the match may be in any order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are equivalent, containing the same objects but the match may be in any order. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are not exactly equal. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are not exactly equal. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - - - - Asserts that expected and actual are not exactly equal. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are not exactly equal. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - - - - Asserts that expected and actual are not exactly equal. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are not exactly equal. - If comparer is not null then it will be used to compare the objects. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The IComparer to use in comparing objects from each IEnumerable - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that expected and actual are not equivalent. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - - - - Asserts that expected and actual are not equivalent. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - - - - Asserts that expected and actual are not equivalent. - - The first IEnumerable of objects to be considered - The second IEnumerable of objects to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that collection contains actual as an item. - - IEnumerable of objects to be considered - Object to be found within collection - - - - Asserts that collection contains actual as an item. - - IEnumerable of objects to be considered - Object to be found within collection - The message that will be displayed on failure - - - - Asserts that collection contains actual as an item. - - IEnumerable of objects to be considered - Object to be found within collection - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that collection does not contain actual as an item. - - IEnumerable of objects to be considered - Object that cannot exist within collection - - - - Asserts that collection does not contain actual as an item. - - IEnumerable of objects to be considered - Object that cannot exist within collection - The message that will be displayed on failure - - - - Asserts that collection does not contain actual as an item. - - IEnumerable of objects to be considered - Object that cannot exist within collection - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that superset is not a subject of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - - - - Asserts that superset is not a subject of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - - - - Asserts that superset is not a subject of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Asserts that superset is a subset of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - - - - Asserts that superset is a subset of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - - - - Asserts that superset is a subset of subset. - - The IEnumerable superset to be considered - The IEnumerable subset to be considered - The message that will be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - - - - Assert that an array,list or other collection is empty - - An array, list or other collection implementing IEnumerable - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is empty - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - - - - Assert that an array,list or other collection is empty - - An array, list or other collection implementing IEnumerable - - - - Assert that an array, list or other collection is ordered - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is ordered - - An array, list or other collection implementing IEnumerable - The message to be displayed on failure - - - - Assert that an array, list or other collection is ordered - - An array, list or other collection implementing IEnumerable - - - - Assert that an array, list or other collection is ordered - - An array, list or other collection implementing IEnumerable - A custom comparer to perform the comparisons - The message to be displayed on failure - Arguments to be used in formatting the message - - - - Assert that an array, list or other collection is ordered - - An array, list or other collection implementing IEnumerable - A custom comparer to perform the comparisons - The message to be displayed on failure - - - - Assert that an array, list or other collection is ordered - - An array, list or other collection implementing IEnumerable - A custom comparer to perform the comparisons - - - - Helper class with properties and methods that supply - a number of constraints used in Asserts. - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Summary description for DirectoryAssert - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - We don't actually want any instances of this object, but some people - like to inherit from it to add other static methods. Hence, the - protected constructor disallows any instances of this object. - - - - - Verifies that two directories are equal. Two directories are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A directory containing the value that is expected - A directory containing the actual value - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Verifies that two directories are equal. Two directories are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A directory containing the value that is expected - A directory containing the actual value - The message to display if directories are not equal - - - - Verifies that two directories are equal. Two directories are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A directory containing the value that is expected - A directory containing the actual value - - - - Verifies that two directories are equal. Two directories are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A directory path string containing the value that is expected - A directory path string containing the actual value - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Verifies that two directories are equal. Two directories are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A directory path string containing the value that is expected - A directory path string containing the actual value - The message to display if directories are not equal - - - - Verifies that two directories are equal. Two directories are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A directory path string containing the value that is expected - A directory path string containing the actual value - - - - Asserts that two directories are not equal. If they are equal - an is thrown. - - A directory containing the value that is expected - A directory containing the actual value - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Asserts that two directories are not equal. If they are equal - an is thrown. - - A directory containing the value that is expected - A directory containing the actual value - The message to display if directories are not equal - - - - Asserts that two directories are not equal. If they are equal - an is thrown. - - A directory containing the value that is expected - A directory containing the actual value - - - - Asserts that two directories are not equal. If they are equal - an is thrown. - - A directory path string containing the value that is expected - A directory path string containing the actual value - The message to display if directories are equal - Arguments to be used in formatting the message - - - - Asserts that two directories are not equal. If they are equal - an is thrown. - - A directory path string containing the value that is expected - A directory path string containing the actual value - The message to display if directories are equal - - - - Asserts that two directories are not equal. If they are equal - an is thrown. - - A directory path string containing the value that is expected - A directory path string containing the actual value - - - - Asserts that the directory is empty. If it is not empty - an is thrown. - - A directory to search - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Asserts that the directory is empty. If it is not empty - an is thrown. - - A directory to search - The message to display if directories are not equal - - - - Asserts that the directory is empty. If it is not empty - an is thrown. - - A directory to search - - - - Asserts that the directory is empty. If it is not empty - an is thrown. - - A directory to search - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Asserts that the directory is empty. If it is not empty - an is thrown. - - A directory to search - The message to display if directories are not equal - - - - Asserts that the directory is empty. If it is not empty - an is thrown. - - A directory to search - - - - Asserts that the directory is not empty. If it is empty - an is thrown. - - A directory to search - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Asserts that the directory is not empty. If it is empty - an is thrown. - - A directory to search - The message to display if directories are not equal - - - - Asserts that the directory is not empty. If it is empty - an is thrown. - - A directory to search - - - - Asserts that the directory is not empty. If it is empty - an is thrown. - - A directory to search - The message to display if directories are not equal - Arguments to be used in formatting the message - - - - Asserts that the directory is not empty. If it is empty - an is thrown. - - A directory to search - The message to display if directories are not equal - - - - Asserts that the directory is not empty. If it is empty - an is thrown. - - A directory to search - - - - Asserts that path contains actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - Arguments to be used in formatting the message - - - - Asserts that path contains actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - - - - Asserts that path contains actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - - - - Asserts that path contains actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - Arguments to be used in formatting the message - - - - Asserts that path contains actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - - - - Asserts that path contains actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - - - - Asserts that path does not contain actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - Arguments to be used in formatting the message - - - - Asserts that path does not contain actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - - - - Asserts that path does not contain actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - - - - Asserts that path does not contain actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - Arguments to be used in formatting the message - - - - Asserts that path does not contain actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - The message to display if directory is not within the path - - - - Asserts that path does not contain actual as a subdirectory or - an is thrown. - - A directory to search - sub-directory asserted to exist under directory - - - - Summary description for FileAssert. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - We don't actually want any instances of this object, but some people - like to inherit from it to add other static methods. Hence, the - protected constructor disallows any instances of this object. - - - - - Verifies that two Streams are equal. Two Streams are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The expected Stream - The actual Stream - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Verifies that two Streams are equal. Two Streams are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The expected Stream - The actual Stream - The message to display if objects are not equal - - - - Verifies that two Streams are equal. Two Streams are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The expected Stream - The actual Stream - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if objects are not equal - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - A file containing the value that is expected - A file containing the actual value - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if objects are not equal - - - - Verifies that two files are equal. Two files are considered - equal if both are null, or if both have the same value byte for byte. - If they are not equal an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - - - - Asserts that two Streams are not equal. If they are equal - an is thrown. - - The expected Stream - The actual Stream - The message to be displayed when the two Stream are the same. - Arguments to be used in formatting the message - - - - Asserts that two Streams are not equal. If they are equal - an is thrown. - - The expected Stream - The actual Stream - The message to be displayed when the Streams are the same. - - - - Asserts that two Streams are not equal. If they are equal - an is thrown. - - The expected Stream - The actual Stream - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - A file containing the value that is expected - A file containing the actual value - The message to display if objects are not equal - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - A file containing the value that is expected - A file containing the actual value - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if Streams are not equal - Arguments to be used in formatting the message - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - The message to display if objects are not equal - - - - Asserts that two files are not equal. If they are equal - an is thrown. - - The path to a file containing the value that is expected - The path to a file containing the actual value - - - - GlobalSettings is a place for setting default values used - by the framework in performing asserts. - - - - - Default tolerance for floating point equality - - - - - Class used to guard against unexpected argument values - by throwing an appropriate exception. - - - - - Throws an exception if an argument is null - - The value to be tested - The name of the argument - - - - Throws an exception if a string argument is null or empty - - The value to be tested - The name of the argument - - - - Helper class with properties and methods that supply - a number of constraints used in Asserts. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding only if a specified number of them succeed. - - - - - Returns a new PropertyConstraintExpression, which will either - test for the existence of the named property on the object - being tested or apply any following constraint to that property. - - - - - Returns a new AttributeConstraint checking for the - presence of a particular attribute on an object. - - - - - Returns a new AttributeConstraint checking for the - presence of a particular attribute on an object. - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - - - - Returns a ConstraintExpression that negates any - following constraint. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if at least one of them succeeds. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them fail. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Length property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Count property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Message property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the InnerException property of the object being tested. - - - - - Interface implemented by a user fixture in order to - validate any expected exceptions. It is only called - for test methods marked with the ExpectedException - attribute. - - - - - Method to handle an expected exception - - The exception to be handled - - - - Helper class with properties and methods that supply - a number of constraints used in Asserts. - - - - - Returns a constraint that tests two items for equality - - - - - Returns a constraint that tests that two references are the same object - - - - - Returns a constraint that tests whether the - actual value is greater than the suppled argument - - - - - Returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is a collection containing the same elements as the - collection supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is a subset of the collection supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value matches the regular expression supplied as an argument. - - - - - Returns a constraint that tests whether the path provided - is the same as an expected path after canonicalization. - - - - - Returns a constraint that tests whether the path provided - is under an expected path after canonicalization. - - - - - Returns a constraint that tests whether the path provided - is the same path or under an expected path after canonicalization. - - - - - Returns a constraint that tests whether the actual value falls - within a specified range. - - - - - Returns a ConstraintExpression that negates any - following constraint. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - Returns a constraint that tests for null - - - - - Returns a constraint that tests for True - - - - - Returns a constraint that tests for False - - - - - Returns a constraint that tests for a positive value - - - - - Returns a constraint that tests for a negative value - - - - - Returns a constraint that tests for NaN - - - - - Returns a constraint that tests for empty - - - - - Returns a constraint that tests whether a collection - contains all unique items. - - - - - Returns a constraint that tests whether an object graph is serializable in binary format. - - - - - Returns a constraint that tests whether an object graph is serializable in xml format. - - - - - Returns a constraint that tests whether a collection is ordered - - - - - The ITestCaseData interface is implemented by a class - that is able to return complete testcases for use by - a parameterized test method. - - NOTE: This interface is used in both the framework - and the core, even though that results in two different - types. However, sharing the source code guarantees that - the various implementations will be compatible and that - the core is able to reflect successfully over the - framework implementations of ITestCaseData. - - - - - Gets the argument list to be provided to the test - - - - - Gets the expected result - - - - - Indicates whether a result has been specified. - This is necessary because the result may be - null, so it's value cannot be checked. - - - - - Gets the expected exception Type - - - - - Gets the FullName of the expected exception - - - - - Gets the name to be used for the test - - - - - Gets the description of the test - - - - - Gets a value indicating whether this is ignored. - - true if ignored; otherwise, false. - - - - Gets a value indicating whether this is explicit. - - true if explicit; otherwise, false. - - - - Gets the ignore reason. - - The ignore reason. - - - - The Iz class is a synonym for Is intended for use in VB, - which regards Is as a keyword. - - - - - The List class is a helper class with properties and methods - that supply a number of constraints used with lists and collections. - - - - - List.Map returns a ListMapper, which can be used to map - the original collection to another collection. - - - - - - - ListMapper is used to transform a collection used as an actual argument - producing another collection to be used in the assertion. - - - - - Construct a ListMapper based on a collection - - The collection to be transformed - - - - Produces a collection containing all the values of a property - - The collection of property values - - - - - Randomizer returns a set of random values in a repeatable - way, to allow re-running of tests if necessary. - - - - - Get a randomizer for a particular member, returning - one that has already been created if it exists. - This ensures that the same values are generated - each time the tests are reloaded. - - - - - Get a randomizer for a particular parameter, returning - one that has already been created if it exists. - This ensures that the same values are generated - each time the tests are reloaded. - - - - - Construct a randomizer using a random seed - - - - - Construct a randomizer using a specified seed - - - - - Return an array of random doubles between 0.0 and 1.0. - - - - - - - Return an array of random doubles with values in a specified range. - - - - - Return an array of random ints with values in a specified range. - - - - - Get a random seed for use in creating a randomizer. - - - - - The SpecialValue enum is used to represent TestCase arguments - that cannot be used as arguments to an Attribute. - - - - - Null represents a null value, which cannot be used as an - argument to an attribute under .NET 1.x - - - - - Basic Asserts on strings. - - - - - The Equals method throws an AssertionException. This is done - to make sure there is no mistake by calling this function. - - - - - - - override the default ReferenceEquals to throw an AssertionException. This - implementation makes sure there is no mistake in calling this function - as part of Assert. - - - - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - - - - Asserts that a string is not found within another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string is found within another string. - - The expected string - The string to be examined - - - - Asserts that a string starts with another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string starts with another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string starts with another string. - - The expected string - The string to be examined - - - - Asserts that a string does not start with another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string does not start with another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string does not start with another string. - - The expected string - The string to be examined - - - - Asserts that a string ends with another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string ends with another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string ends with another string. - - The expected string - The string to be examined - - - - Asserts that a string does not end with another string. - - The expected string - The string to be examined - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string does not end with another string. - - The expected string - The string to be examined - The message to display in case of failure - - - - Asserts that a string does not end with another string. - - The expected string - The string to be examined - - - - Asserts that two strings are equal, without regard to case. - - The expected string - The actual string - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that two strings are equal, without regard to case. - - The expected string - The actual string - The message to display in case of failure - - - - Asserts that two strings are equal, without regard to case. - - The expected string - The actual string - - - - Asserts that two strings are not equal, without regard to case. - - The expected string - The actual string - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that two strings are Notequal, without regard to case. - - The expected string - The actual string - The message to display in case of failure - - - - Asserts that two strings are not equal, without regard to case. - - The expected string - The actual string - - - - Asserts that a string matches an expected regular expression pattern. - - The regex pattern to be matched - The actual string - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string matches an expected regular expression pattern. - - The regex pattern to be matched - The actual string - The message to display in case of failure - - - - Asserts that a string matches an expected regular expression pattern. - - The regex pattern to be matched - The actual string - - - - Asserts that a string does not match an expected regular expression pattern. - - The regex pattern to be used - The actual string - The message to display in case of failure - Arguments used in formatting the message - - - - Asserts that a string does not match an expected regular expression pattern. - - The regex pattern to be used - The actual string - The message to display in case of failure - - - - Asserts that a string does not match an expected regular expression pattern. - - The regex pattern to be used - The actual string - - - - The TestCaseData class represents a set of arguments - and other parameter info to be used for a parameterized - test case. It provides a number of instance modifiers - for use in initializing the test case. - - Note: Instance modifiers are getters that return - the same instance after modifying it's state. - - - - - The argument list to be provided to the test - - - - - The expected result to be returned - - - - - Set to true if this has an expected result - - - - - The expected exception Type - - - - - The FullName of the expected exception - - - - - The name to be used for the test - - - - - The description of the test - - - - - A dictionary of properties, used to add information - to tests without requiring the class to change. - - - - - If true, indicates that the test case is to be ignored - - - - - If true, indicates that the test case is marked explicit - - - - - The reason for ignoring a test case - - - - - Initializes a new instance of the class. - - The arguments. - - - - Initializes a new instance of the class. - - The argument. - - - - Initializes a new instance of the class. - - The first argument. - The second argument. - - - - Initializes a new instance of the class. - - The first argument. - The second argument. - The third argument. - - - - Sets the expected result for the test - - The expected result - A modified TestCaseData - - - - Sets the expected exception type for the test - - Type of the expected exception. - The modified TestCaseData instance - - - - Sets the expected exception type for the test - - FullName of the expected exception. - The modified TestCaseData instance - - - - Sets the name of the test case - - The modified TestCaseData instance - - - - Sets the description for the test case - being constructed. - - The description. - The modified TestCaseData instance. - - - - Applies a category to the test - - - - - - - Applies a named property to the test - - - - - - - - Applies a named property to the test - - - - - - - - Applies a named property to the test - - - - - - - - Ignores this TestCase. - - - - - - Ignores this TestCase, specifying the reason. - - The reason. - - - - - Marks this TestCase as Explicit - - - - - - Marks this TestCase as Explicit, specifying the reason. - - The reason. - - - - - Gets the argument list to be provided to the test - - - - - Gets the expected result - - - - - Returns true if the result has been set - - - - - Gets the expected exception Type - - - - - Gets the FullName of the expected exception - - - - - Gets the name to be used for the test - - - - - Gets the description of the test - - - - - Gets a value indicating whether this is ignored. - - true if ignored; otherwise, false. - - - - Gets a value indicating whether this is explicit. - - true if explicit; otherwise, false. - - - - Gets the ignore reason. - - The ignore reason. - - - - Gets a list of categories associated with this test. - - - - - Gets the property dictionary for this test - - - - - Provide the context information of the current test - - - - - Constructs a TestContext using the provided context dictionary - - A context dictionary - - - - Get the current test context. This is created - as needed. The user may save the context for - use within a test, but it should not be used - outside the test for which it is created. - - - - - Gets a TestAdapter representing the currently executing test in this context. - - - - - Gets a ResultAdapter representing the current result for the test - executing in this context. - - - - - Gets the directory containing the current test assembly. - - - - - Gets the directory to be used for outputing files created - by this test run. - - - - - TestAdapter adapts a Test for consumption by - the user test code. - - - - - Constructs a TestAdapter for this context - - The context dictionary - - - - The name of the test. - - - - - The FullName of the test - - - - - The properties of the test. - - - - - ResultAdapter adapts a TestResult for consumption by - the user test code. - - - - - Construct a ResultAdapter for a context - - The context holding the result - - - - The TestState of current test. This maps to the ResultState - used in nunit.core and is subject to change in the future. - - - - - The TestStatus of current test. This enum will be used - in future versions of NUnit and so is to be preferred - to the TestState value. - - - - - Provides details about a test - - - - - Creates an instance of TestDetails - - The fixture that the test is a member of, if available. - The method that implements the test, if available. - The full name of the test. - A string representing the type of test, e.g. "Test Case". - Indicates if the test represents a suite of tests. - - - - The fixture that the test is a member of, if available. - - - - - The method that implements the test, if available. - - - - - The full name of the test. - - - - - A string representing the type of test, e.g. "Test Case". - - - - - Indicates if the test represents a suite of tests. - - - - - The ResultState enum indicates the result of running a test - - - - - The result is inconclusive - - - - - The test was not runnable. - - - - - The test has been skipped. - - - - - The test has been ignored. - - - - - The test succeeded - - - - - The test failed - - - - - The test encountered an unexpected exception - - - - - The test was cancelled by the user - - - - - The TestStatus enum indicates the result of running a test - - - - - The test was inconclusive - - - - - The test has skipped - - - - - The test succeeded - - - - - The test failed - - - - - Helper class with static methods used to supply constraints - that operate on strings. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that fails if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that fails if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that fails if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value matches the Regex pattern supplied as an argument. - - - - - Returns a constraint that fails if the actual - value matches the pattern supplied as an argument. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - TextMessageWriter writes constraint descriptions and messages - in displayable form as a text stream. It tailors the display - of individual message components to form the standard message - format of NUnit assertion failure messages. - - - - - MessageWriter is the abstract base for classes that write - constraint descriptions and messages in some form. The - class has separate methods for writing various components - of a message, allowing implementations to tailor the - presentation as needed. - - - - - Construct a MessageWriter given a culture - - - - - Method to write single line message with optional args, usually - written to precede the general failure message. - - The message to be written - Any arguments used in formatting the message - - - - Method to write single line message with optional args, usually - written to precede the general failure message, at a givel - indentation level. - - The indentation level of the message - The message to be written - Any arguments used in formatting the message - - - - Display Expected and Actual lines for a constraint. This - is called by MessageWriter's default implementation of - WriteMessageTo and provides the generic two-line display. - - The constraint that failed - - - - Display Expected and Actual lines for given values. This - method may be called by constraints that need more control over - the display of actual and expected values than is provided - by the default implementation. - - The expected value - The actual value causing the failure - - - - Display Expected and Actual lines for given values, including - a tolerance value on the Expected line. - - The expected value - The actual value causing the failure - The tolerance within which the test was made - - - - Display the expected and actual string values on separate lines. - If the mismatch parameter is >=0, an additional line is displayed - line containing a caret that points to the mismatch point. - - The expected string value - The actual string value - The point at which the strings don't match or -1 - If true, case is ignored in locating the point where the strings differ - If true, the strings should be clipped to fit the line - - - - Writes the text for a connector. - - The connector. - - - - Writes the text for a predicate. - - The predicate. - - - - Writes the text for an expected value. - - The expected value. - - - - Writes the text for a modifier - - The modifier. - - - - Writes the text for an actual value. - - The actual value. - - - - Writes the text for a generalized value. - - The value. - - - - Writes the text for a collection value, - starting at a particular point, to a max length - - The collection containing elements to write. - The starting point of the elements to write - The maximum number of elements to write - - - - Abstract method to get the max line length - - - - - Prefix used for the expected value line of a message - - - - - Prefix used for the actual value line of a message - - - - - Length of a message prefix - - - - - Construct a TextMessageWriter - - - - - Construct a TextMessageWriter, specifying a user message - and optional formatting arguments. - - - - - - - Method to write single line message with optional args, usually - written to precede the general failure message, at a givel - indentation level. - - The indentation level of the message - The message to be written - Any arguments used in formatting the message - - - - Display Expected and Actual lines for a constraint. This - is called by MessageWriter's default implementation of - WriteMessageTo and provides the generic two-line display. - - The constraint that failed - - - - Display Expected and Actual lines for given values. This - method may be called by constraints that need more control over - the display of actual and expected values than is provided - by the default implementation. - - The expected value - The actual value causing the failure - - - - Display Expected and Actual lines for given values, including - a tolerance value on the expected line. - - The expected value - The actual value causing the failure - The tolerance within which the test was made - - - - Display the expected and actual string values on separate lines. - If the mismatch parameter is >=0, an additional line is displayed - line containing a caret that points to the mismatch point. - - The expected string value - The actual string value - The point at which the strings don't match or -1 - If true, case is ignored in string comparisons - If true, clip the strings to fit the max line length - - - - Writes the text for a connector. - - The connector. - - - - Writes the text for a predicate. - - The predicate. - - - - Write the text for a modifier. - - The modifier. - - - - Writes the text for an expected value. - - The expected value. - - - - Writes the text for an actual value. - - The actual value. - - - - Writes the text for a generalized value. - - The value. - - - - Writes the text for a collection value, - starting at a particular point, to a max length - - The collection containing elements to write. - The starting point of the elements to write - The maximum number of elements to write - - - - Write the generic 'Expected' line for a constraint - - The constraint that failed - - - - Write the generic 'Expected' line for a given value - - The expected value - - - - Write the generic 'Expected' line for a given value - and tolerance. - - The expected value - The tolerance within which the test was made - - - - Write the generic 'Actual' line for a constraint - - The constraint for which the actual value is to be written - - - - Write the generic 'Actual' line for a given value - - The actual value causing a failure - - - - Gets or sets the maximum line length for this writer - - - - - Helper class with properties and methods that supply - constraints that operate on exceptions. - - - - - Creates a constraint specifying the exact type of exception expected - - - - - Creates a constraint specifying the exact type of exception expected - - - - - Creates a constraint specifying the type of exception expected - - - - - Creates a constraint specifying the type of exception expected - - - - - Creates a constraint specifying an expected exception - - - - - Creates a constraint specifying an exception with a given InnerException - - - - - Creates a constraint specifying an expected TargetInvocationException - - - - - Creates a constraint specifying an expected TargetInvocationException - - - - - Creates a constraint specifying an expected TargetInvocationException - - - - - Creates a constraint specifying that no exception is thrown - - - - - Attribute used to apply a category to a test - - - - - The name of the category - - - - - Construct attribute for a given category based on - a name. The name may not contain the characters ',', - '+', '-' or '!'. However, this is not checked in the - constructor since it would cause an error to arise at - as the test was loaded without giving a clear indication - of where the problem is located. The error is handled - in NUnitFramework.cs by marking the test as not - runnable. - - The name of the category - - - - Protected constructor uses the Type name as the name - of the category. - - - - - The name of the category - - - - - Used to mark a field for use as a datapoint when executing a theory - within the same fixture that requires an argument of the field's Type. - - - - - Used to mark an array as containing a set of datapoints to be used - executing a theory within the same fixture that requires an argument - of the Type of the array elements. - - - - - Attribute used to provide descriptive text about a - test case or fixture. - - - - - Construct the attribute - - Text describing the test - - - - Gets the test description - - - - - Enumeration indicating how the expected message parameter is to be used - - - - Expect an exact match - - - Expect a message containing the parameter string - - - Match the regular expression provided as a parameter - - - Expect a message that starts with the parameter string - - - - ExpectedExceptionAttribute - - - - - - Constructor for a non-specific exception - - - - - Constructor for a given type of exception - - The type of the expected exception - - - - Constructor for a given exception name - - The full name of the expected exception - - - - Gets or sets the expected exception type - - - - - Gets or sets the full Type name of the expected exception - - - - - Gets or sets the expected message text - - - - - Gets or sets the user message displayed in case of failure - - - - - Gets or sets the type of match to be performed on the expected message - - - - - Gets the name of a method to be used as an exception handler - - - - - ExplicitAttribute marks a test or test fixture so that it will - only be run if explicitly executed from the gui or command line - or if it is included by use of a filter. The test will not be - run simply because an enclosing suite is run. - - - - - Default constructor - - - - - Constructor with a reason - - The reason test is marked explicit - - - - The reason test is marked explicit - - - - - Attribute used to mark a test that is to be ignored. - Ignored tests result in a warning message when the - tests are run. - - - - - Constructs the attribute without giving a reason - for ignoring the test. - - - - - Constructs the attribute giving a reason for ignoring the test - - The reason for ignoring the test - - - - The reason for ignoring a test - - - - - Abstract base for Attributes that are used to include tests - in the test run based on environmental settings. - - - - - Constructor with no included items specified, for use - with named property syntax. - - - - - Constructor taking one or more included items - - Comma-delimited list of included items - - - - Name of the item that is needed in order for - a test to run. Multiple itemss may be given, - separated by a comma. - - - - - Name of the item to be excluded. Multiple items - may be given, separated by a comma. - - - - - The reason for including or excluding the test - - - - - PlatformAttribute is used to mark a test fixture or an - individual method as applying to a particular platform only. - - - - - Constructor with no platforms specified, for use - with named property syntax. - - - - - Constructor taking one or more platforms - - Comma-deliminted list of platforms - - - - CultureAttribute is used to mark a test fixture or an - individual method as applying to a particular Culture only. - - - - - Constructor with no cultures specified, for use - with named property syntax. - - - - - Constructor taking one or more cultures - - Comma-deliminted list of cultures - - - - Marks a test to use a combinatorial join of any argument data - provided. NUnit will create a test case for every combination of - the arguments provided. This can result in a large number of test - cases and so should be used judiciously. This is the default join - type, so the attribute need not be used except as documentation. - - - - - PropertyAttribute is used to attach information to a test as a name/value pair.. - - - - - Construct a PropertyAttribute with a name and string value - - The name of the property - The property value - - - - Construct a PropertyAttribute with a name and int value - - The name of the property - The property value - - - - Construct a PropertyAttribute with a name and double value - - The name of the property - The property value - - - - Constructor for derived classes that set the - property dictionary directly. - - - - - Constructor for use by derived classes that use the - name of the type as the property name. Derived classes - must ensure that the Type of the property value is - a standard type supported by the BCL. Any custom - types will cause a serialization Exception when - in the client. - - - - - Gets the property dictionary for this attribute - - - - - Default constructor - - - - - Marks a test to use pairwise join of any argument data provided. - NUnit will attempt too excercise every pair of argument values at - least once, using as small a number of test cases as it can. With - only two arguments, this is the same as a combinatorial join. - - - - - Default constructor - - - - - Marks a test to use a sequential join of any argument data - provided. NUnit will use arguements for each parameter in - sequence, generating test cases up to the largest number - of argument values provided and using null for any arguments - for which it runs out of values. Normally, this should be - used with the same number of arguments for each parameter. - - - - - Default constructor - - - - - Summary description for MaxTimeAttribute. - - - - - Construct a MaxTimeAttribute, given a time in milliseconds. - - The maximum elapsed time in milliseconds - - - - RandomAttribute is used to supply a set of random values - to a single parameter of a parameterized test. - - - - - ValuesAttribute is used to provide literal arguments for - an individual parameter of a test. - - - - - Abstract base class for attributes that apply to parameters - and supply data for the parameter. - - - - - Gets the data to be provided to the specified parameter - - - - - The collection of data to be returned. Must - be set by any derived attribute classes. - We use an object[] so that the individual - elements may have their type changed in GetData - if necessary. - - - - - Construct with one argument - - - - - - Construct with two arguments - - - - - - - Construct with three arguments - - - - - - - - Construct with an array of arguments - - - - - - Get the collection of values to be used as arguments - - - - - Construct a set of doubles from 0.0 to 1.0, - specifying only the count. - - - - - - Construct a set of doubles from min to max - - - - - - - - Construct a set of ints from min to max - - - - - - - - Get the collection of values to be used as arguments - - - - - RangeAttribute is used to supply a range of values to an - individual parameter of a parameterized test. - - - - - Construct a range of ints using default step of 1 - - - - - - - Construct a range of ints specifying the step size - - - - - - - - Construct a range of longs - - - - - - - - Construct a range of doubles - - - - - - - - Construct a range of floats - - - - - - - - RepeatAttribute may be applied to test case in order - to run it multiple times. - - - - - Construct a RepeatAttribute - - The number of times to run the test - - - - RequiredAddinAttribute may be used to indicate the names of any addins - that must be present in order to run some or all of the tests in an - assembly. If the addin is not loaded, the entire assembly is marked - as NotRunnable. - - - - - Initializes a new instance of the class. - - The required addin. - - - - Gets the name of required addin. - - The required addin name. - - - - Summary description for SetCultureAttribute. - - - - - Construct given the name of a culture - - - - - - Summary description for SetUICultureAttribute. - - - - - Construct given the name of a culture - - - - - - SetUpAttribute is used in a TestFixture to identify a method - that is called immediately before each test is run. It is - also used in a SetUpFixture to identify the method that is - called once, before any of the subordinate tests are run. - - - - - Attribute used to mark a class that contains one-time SetUp - and/or TearDown methods that apply to all the tests in a - namespace or an assembly. - - - - - Attribute used to mark a static (shared in VB) property - that returns a list of tests. - - - - - Attribute used in a TestFixture to identify a method that is - called immediately after each test is run. It is also used - in a SetUpFixture to identify the method that is called once, - after all subordinate tests have run. In either case, the method - is guaranteed to be called, even if an exception is thrown. - - - - - Provide actions to execute before and after tests. - - - - - When implemented by an attribute, this interface implemented to provide actions to execute before and after tests. - - - - - Executed before each test is run - - Provides details about the test that is going to be run. - - - - Executed after each test is run - - Provides details about the test that has just been run. - - - - Provides the target for the action attribute - - The target for the action attribute - - - - Adding this attribute to a method within a - class makes the method callable from the NUnit test runner. There is a property - called Description which is optional which you can provide a more detailed test - description. This class cannot be inherited. - - - - [TestFixture] - public class Fixture - { - [Test] - public void MethodToTest() - {} - - [Test(Description = "more detailed description")] - publc void TestDescriptionMethod() - {} - } - - - - - - Descriptive text for this test - - - - - TestCaseAttribute is used to mark parameterized test cases - and provide them with their arguments. - - - - - Construct a TestCaseAttribute with a list of arguments. - This constructor is not CLS-Compliant - - - - - - Construct a TestCaseAttribute with a single argument - - - - - - Construct a TestCaseAttribute with a two arguments - - - - - - - Construct a TestCaseAttribute with a three arguments - - - - - - - - Gets the list of arguments to a test case - - - - - Gets or sets the expected result. Use - ExpectedResult by preference. - - The result. - - - - Gets or sets the expected result. - - The result. - - - - Gets a flag indicating whether an expected - result has been set. - - - - - Gets a list of categories associated with this test; - - - - - Gets or sets the category associated with this test. - May be a single category or a comma-separated list. - - - - - Gets or sets the expected exception. - - The expected exception. - - - - Gets or sets the name the expected exception. - - The expected name of the exception. - - - - Gets or sets the expected message of the expected exception - - The expected message of the exception. - - - - Gets or sets the type of match to be performed on the expected message - - - - - Gets or sets the description. - - The description. - - - - Gets or sets the name of the test. - - The name of the test. - - - - Gets or sets the ignored status of the test - - - - - Gets or sets the ignored status of the test - - - - - Gets or sets the explicit status of the test - - - - - Gets or sets the reason for not running the test - - - - - Gets or sets the reason for not running the test. - Set has the side effect of marking the test as ignored. - - The ignore reason. - - - - FactoryAttribute indicates the source to be used to - provide test cases for a test method. - - - - - Construct with the name of the data source, which must - be a property, field or method of the test class itself. - - An array of the names of the factories that will provide data - - - - Construct with a Type, which must implement IEnumerable - - The Type that will provide data - - - - Construct with a Type and name. - that don't support params arrays. - - The Type that will provide data - The name of the method, property or field that will provide data - - - - The name of a the method, property or fiend to be used as a source - - - - - A Type to be used as a source - - - - - Gets or sets the category associated with this test. - May be a single category or a comma-separated list. - - - - - [TestFixture] - public class ExampleClass - {} - - - - - Default constructor - - - - - Construct with a object[] representing a set of arguments. - In .NET 2.0, the arguments may later be separated into - type arguments and constructor arguments. - - - - - - Descriptive text for this fixture - - - - - Gets and sets the category for this fixture. - May be a comma-separated list of categories. - - - - - Gets a list of categories for this fixture - - - - - The arguments originally provided to the attribute - - - - - Gets or sets a value indicating whether this should be ignored. - - true if ignore; otherwise, false. - - - - Gets or sets the ignore reason. May set Ignored as a side effect. - - The ignore reason. - - - - Get or set the type arguments. If not set - explicitly, any leading arguments that are - Types are taken as type arguments. - - - - - Attribute used to identify a method that is - called before any tests in a fixture are run. - - - - - Attribute used to identify a method that is called after - all the tests in a fixture have run. The method is - guaranteed to be called, even if an exception is thrown. - - - - - Adding this attribute to a method within a - class makes the method callable from the NUnit test runner. There is a property - called Description which is optional which you can provide a more detailed test - description. This class cannot be inherited. - - - - [TestFixture] - public class Fixture - { - [Test] - public void MethodToTest() - {} - - [Test(Description = "more detailed description")] - publc void TestDescriptionMethod() - {} - } - - - - - - Used on a method, marks the test with a timeout value in milliseconds. - The test will be run in a separate thread and is cancelled if the timeout - is exceeded. Used on a method or assembly, sets the default timeout - for all contained test methods. - - - - - Construct a TimeoutAttribute given a time in milliseconds - - The timeout value in milliseconds - - - - Marks a test that must run in the STA, causing it - to run in a separate thread if necessary. - - On methods, you may also use STAThreadAttribute - to serve the same purpose. - - - - - Construct a RequiresSTAAttribute - - - - - Marks a test that must run in the MTA, causing it - to run in a separate thread if necessary. - - On methods, you may also use MTAThreadAttribute - to serve the same purpose. - - - - - Construct a RequiresMTAAttribute - - - - - Marks a test that must run on a separate thread. - - - - - Construct a RequiresThreadAttribute - - - - - Construct a RequiresThreadAttribute, specifying the apartment - - - - - ValueSourceAttribute indicates the source to be used to - provide data for one parameter of a test method. - - - - - Construct with the name of the factory - for use with languages - that don't support params arrays. - - The name of the data source to be used - - - - Construct with a Type and name - for use with languages - that don't support params arrays. - - The Type that will provide data - The name of the method, property or field that will provide data - - - - The name of a the method, property or fiend to be used as a source - - - - - A Type to be used as a source - - - - - AllItemsConstraint applies another constraint to each - item in a collection, succeeding if they all succeed. - - - - - Abstract base class used for prefixes - - - - - The Constraint class is the base of all built-in constraints - within NUnit. It provides the operator overloads used to combine - constraints. - - - - - The IConstraintExpression interface is implemented by all - complete and resolvable constraints and expressions. - - - - - Return the top-level constraint for this expression - - - - - - Static UnsetObject used to detect derived constraints - failing to set the actual value. - - - - - The actual value being tested against a constraint - - - - - The display name of this Constraint for use by ToString() - - - - - Argument fields used by ToString(); - - - - - The builder holding this constraint - - - - - Construct a constraint with no arguments - - - - - Construct a constraint with one argument - - - - - Construct a constraint with two arguments - - - - - Sets the ConstraintBuilder holding this constraint - - - - - Write the failure message to the MessageWriter provided - as an argument. The default implementation simply passes - the constraint and the actual value to the writer, which - then displays the constraint description and the value. - - Constraints that need to provide additional details, - such as where the error occured can override this. - - The MessageWriter on which to display the message - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Test whether the constraint is satisfied by an - ActualValueDelegate that returns the value to be tested. - The default implementation simply evaluates the delegate - but derived classes may override it to provide for delayed - processing. - - An - True for success, false for failure - - - - Test whether the constraint is satisfied by a given reference. - The default implementation simply dereferences the value but - derived classes may override it to provide for delayed processing. - - A reference to the value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - Default override of ToString returns the constraint DisplayName - followed by any arguments within angle brackets. - - - - - - Returns the string representation of this constraint - - - - - This operator creates a constraint that is satisfied only if both - argument constraints are satisfied. - - - - - This operator creates a constraint that is satisfied if either - of the argument constraints is satisfied. - - - - - This operator creates a constraint that is satisfied if the - argument constraint is not satisfied. - - - - - Returns a DelayedConstraint with the specified delay time. - - The delay in milliseconds. - - - - - Returns a DelayedConstraint with the specified delay time - and polling interval. - - The delay in milliseconds. - The interval at which to test the constraint. - - - - - The display name of this Constraint for use by ToString(). - The default value is the name of the constraint with - trailing "Constraint" removed. Derived classes may set - this to another name in their constructors. - - - - - Returns a ConstraintExpression by appending And - to the current constraint. - - - - - Returns a ConstraintExpression by appending And - to the current constraint. - - - - - Returns a ConstraintExpression by appending Or - to the current constraint. - - - - - Class used to detect any derived constraints - that fail to set the actual value in their - Matches override. - - - - - The base constraint - - - - - Construct given a base constraint - - - - - - Construct an AllItemsConstraint on top of an existing constraint - - - - - - Apply the item constraint to each item in the collection, - failing if any item fails. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - AndConstraint succeeds only if both members succeed. - - - - - BinaryConstraint is the abstract base of all constraints - that combine two other constraints in some fashion. - - - - - The first constraint being combined - - - - - The second constraint being combined - - - - - Construct a BinaryConstraint from two other constraints - - The first constraint - The second constraint - - - - Create an AndConstraint from two other constraints - - The first constraint - The second constraint - - - - Apply both member constraints to an actual value, succeeding - succeeding only if both of them succeed. - - The actual value - True if the constraints both succeeded - - - - Write a description for this contraint to a MessageWriter - - The MessageWriter to receive the description - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - AssignableFromConstraint is used to test that an object - can be assigned from a given Type. - - - - - TypeConstraint is the abstract base for constraints - that take a Type as their expected value. - - - - - The expected Type used by the constraint - - - - - Construct a TypeConstraint for a given Type - - - - - - Write the actual value for a failing constraint test to a - MessageWriter. TypeConstraints override this method to write - the name of the type. - - The writer on which the actual value is displayed - - - - Construct an AssignableFromConstraint for the type provided - - - - - - Test whether an object can be assigned from the specified type - - The object to be tested - True if the object can be assigned a value of the expected Type, otherwise false. - - - - Write a description of this constraint to a MessageWriter - - The MessageWriter to use - - - - AssignableToConstraint is used to test that an object - can be assigned to a given Type. - - - - - Construct an AssignableToConstraint for the type provided - - - - - - Test whether an object can be assigned to the specified type - - The object to be tested - True if the object can be assigned a value of the expected Type, otherwise false. - - - - Write a description of this constraint to a MessageWriter - - The MessageWriter to use - - - - AttributeConstraint tests that a specified attribute is present - on a Type or other provider and that the value of the attribute - satisfies some other constraint. - - - - - Constructs an AttributeConstraint for a specified attriute - Type and base constraint. - - - - - - - Determines whether the Type or other provider has the - expected attribute and if its value matches the - additional constraint specified. - - - - - Writes a description of the attribute to the specified writer. - - - - - Writes the actual value supplied to the specified writer. - - - - - Returns a string representation of the constraint. - - - - - AttributeExistsConstraint tests for the presence of a - specified attribute on a Type. - - - - - Constructs an AttributeExistsConstraint for a specific attribute Type - - - - - - Tests whether the object provides the expected attribute. - - A Type, MethodInfo, or other ICustomAttributeProvider - True if the expected attribute is present, otherwise false - - - - Writes the description of the constraint to the specified writer - - - - - BasicConstraint is the abstract base for constraints that - perform a simple comparison to a constant value. - - - - - Initializes a new instance of the class. - - The expected. - The description. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - BinarySerializableConstraint tests whether - an object is serializable in binary format. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - Returns the string representation - - - - - CollectionConstraint is the abstract base class for - constraints that operate on collections. - - - - - Construct an empty CollectionConstraint - - - - - Construct a CollectionConstraint - - - - - - Determines whether the specified enumerable is empty. - - The enumerable. - - true if the specified enumerable is empty; otherwise, false. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Protected method to be implemented by derived classes - - - - - - - CollectionContainsConstraint is used to test whether a collection - contains an expected object as a member. - - - - - CollectionItemsEqualConstraint is the abstract base class for all - collection constraints that apply some notion of item equality - as a part of their operation. - - - - - Construct an empty CollectionConstraint - - - - - Construct a CollectionConstraint - - - - - - Flag the constraint to use the supplied EqualityAdapter. - NOTE: For internal use only. - - The EqualityAdapter to use. - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied Comparison object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IEqualityComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IEqualityComparer object. - - The IComparer object to use. - Self. - - - - Compares two collection members for equality - - - - - Return a new CollectionTally for use in making tests - - The collection to be included in the tally - - - - Flag the constraint to ignore case and return self. - - - - - Construct a CollectionContainsConstraint - - - - - - Test whether the expected item is contained in the collection - - - - - - - Write a descripton of the constraint to a MessageWriter - - - - - - CollectionEquivalentCOnstraint is used to determine whether two - collections are equivalent. - - - - - Construct a CollectionEquivalentConstraint - - - - - - Test whether two collections are equivalent - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - CollectionOrderedConstraint is used to test whether a collection is ordered. - - - - - Construct a CollectionOrderedConstraint - - - - - Modifies the constraint to use an IComparer and returns self. - - - - - Modifies the constraint to use an IComparer<T> and returns self. - - - - - Modifies the constraint to use a Comparison<T> and returns self. - - - - - Modifies the constraint to test ordering by the value of - a specified property and returns self. - - - - - Test whether the collection is ordered - - - - - - - Write a description of the constraint to a MessageWriter - - - - - - Returns the string representation of the constraint. - - - - - - If used performs a reverse comparison - - - - - CollectionSubsetConstraint is used to determine whether - one collection is a subset of another - - - - - Construct a CollectionSubsetConstraint - - The collection that the actual value is expected to be a subset of - - - - Test whether the actual collection is a subset of - the expected collection provided. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - CollectionTally counts (tallies) the number of - occurences of each object in one or more enumerations. - - - - - Construct a CollectionTally object from a comparer and a collection - - - - - Try to remove an object from the tally - - The object to remove - True if successful, false if the object was not found - - - - Try to remove a set of objects from the tally - - The objects to remove - True if successful, false if any object was not found - - - - The number of objects remaining in the tally - - - - - ComparisonAdapter class centralizes all comparisons of - values in NUnit, adapting to the use of any provided - IComparer, IComparer<T> or Comparison<T> - - - - - Returns a ComparisonAdapter that wraps an IComparer - - - - - Returns a ComparisonAdapter that wraps an IComparer<T> - - - - - Returns a ComparisonAdapter that wraps a Comparison<T> - - - - - Compares two objects - - - - - Gets the default ComparisonAdapter, which wraps an - NUnitComparer object. - - - - - Construct a ComparisonAdapter for an IComparer - - - - - Compares two objects - - - - - - - - Construct a default ComparisonAdapter - - - - - ComparisonAdapter<T> extends ComparisonAdapter and - allows use of an IComparer<T> or Comparison<T> - to actually perform the comparison. - - - - - Construct a ComparisonAdapter for an IComparer<T> - - - - - Compare a Type T to an object - - - - - Construct a ComparisonAdapter for a Comparison<T> - - - - - Compare a Type T to an object - - - - - Abstract base class for constraints that compare values to - determine if one is greater than, equal to or less than - the other. This class supplies the Using modifiers. - - - - - ComparisonAdapter to be used in making the comparison - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the class. - - - - - Modifies the constraint to use an IComparer and returns self - - - - - Modifies the constraint to use an IComparer<T> and returns self - - - - - Modifies the constraint to use a Comparison<T> and returns self - - - - - Delegate used to delay evaluation of the actual value - to be used in evaluating a constraint - - - - - ConstraintBuilder maintains the stacks that are used in - processing a ConstraintExpression. An OperatorStack - is used to hold operators that are waiting for their - operands to be reognized. a ConstraintStack holds - input constraints as well as the results of each - operator applied. - - - - - Initializes a new instance of the class. - - - - - Appends the specified operator to the expression by first - reducing the operator stack and then pushing the new - operator on the stack. - - The operator to push. - - - - Appends the specified constraint to the expresson by pushing - it on the constraint stack. - - The constraint to push. - - - - Sets the top operator right context. - - The right context. - - - - Reduces the operator stack until the topmost item - precedence is greater than or equal to the target precedence. - - The target precedence. - - - - Resolves this instance, returning a Constraint. If the builder - is not currently in a resolvable state, an exception is thrown. - - The resolved constraint - - - - Gets a value indicating whether this instance is resolvable. - - - true if this instance is resolvable; otherwise, false. - - - - - OperatorStack is a type-safe stack for holding ConstraintOperators - - - - - Initializes a new instance of the class. - - The builder. - - - - Pushes the specified operator onto the stack. - - The op. - - - - Pops the topmost operator from the stack. - - - - - - Gets a value indicating whether this is empty. - - true if empty; otherwise, false. - - - - Gets the topmost operator without modifying the stack. - - The top. - - - - ConstraintStack is a type-safe stack for holding Constraints - - - - - Initializes a new instance of the class. - - The builder. - - - - Pushes the specified constraint. As a side effect, - the constraint's builder field is set to the - ConstraintBuilder owning this stack. - - The constraint. - - - - Pops this topmost constrait from the stack. - As a side effect, the constraint's builder - field is set to null. - - - - - - Gets a value indicating whether this is empty. - - true if empty; otherwise, false. - - - - Gets the topmost constraint without modifying the stack. - - The topmost constraint - - - - ConstraintExpression represents a compound constraint in the - process of being constructed from a series of syntactic elements. - - Individual elements are appended to the expression as they are - reognized. Once an actual Constraint is appended, the expression - returns a resolvable Constraint. - - - - - ConstraintExpressionBase is the abstract base class for the - ConstraintExpression class, which represents a - compound constraint in the process of being constructed - from a series of syntactic elements. - - NOTE: ConstraintExpressionBase is separate because the - ConstraintExpression class was generated in earlier - versions of NUnit. The two classes may be combined - in a future version. - - - - - The ConstraintBuilder holding the elements recognized so far - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the - class passing in a ConstraintBuilder, which may be pre-populated. - - The builder. - - - - Returns a string representation of the expression as it - currently stands. This should only be used for testing, - since it has the side-effect of resolving the expression. - - - - - - Appends an operator to the expression and returns the - resulting expression itself. - - - - - Appends a self-resolving operator to the expression and - returns a new ResolvableConstraintExpression. - - - - - Appends a constraint to the expression and returns that - constraint, which is associated with the current state - of the expression being built. - - - - - Initializes a new instance of the class. - - - - - Initializes a new instance of the - class passing in a ConstraintBuilder, which may be pre-populated. - - The builder. - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding only if a specified number of them succeed. - - - - - Returns a new PropertyConstraintExpression, which will either - test for the existence of the named property on the object - being tested or apply any following constraint to that property. - - - - - Returns a new AttributeConstraint checking for the - presence of a particular attribute on an object. - - - - - Returns a new AttributeConstraint checking for the - presence of a particular attribute on an object. - - - - - Returns the constraint provided as an argument - used to allow custom - custom constraints to easily participate in the syntax. - - - - - Returns the constraint provided as an argument - used to allow custom - custom constraints to easily participate in the syntax. - - - - - Returns a constraint that tests two items for equality - - - - - Returns a constraint that tests that two references are the same object - - - - - Returns a constraint that tests whether the - actual value is greater than the suppled argument - - - - - Returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is greater than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Returns a constraint that tests whether the - actual value is less than or equal to the suppled argument - - - - - Returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Returns a constraint that tests whether the actual - value is of the exact type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is of the type supplied as an argument or a derived type. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is assignable from the type supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is a collection containing the same elements as the - collection supplied as an argument. - - - - - Returns a constraint that tests whether the actual value - is a subset of the collection supplied as an argument. - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - - - - Returns a new CollectionContainsConstraint checking for the - presence of a particular object in the collection. - - - - - Returns a new ContainsConstraint. This constraint - will, in turn, make use of the appropriate second-level - constraint, depending on the type of the actual argument. - This overload is only used if the item sought is a string, - since any other type implies that we are looking for a - collection member. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value contains the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value starts with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value ends with the substring supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value matches the regular expression supplied as an argument. - - - - - Returns a constraint that succeeds if the actual - value matches the regular expression supplied as an argument. - - - - - Returns a constraint that tests whether the path provided - is the same as an expected path after canonicalization. - - - - - Returns a constraint that tests whether the path provided - is the same path or under an expected path after canonicalization. - - - - - Returns a constraint that tests whether the path provided - is the same path or under an expected path after canonicalization. - - - - - Returns a constraint that tests whether the actual value falls - within a specified range. - - - - - Returns a ConstraintExpression that negates any - following constraint. - - - - - Returns a ConstraintExpression that negates any - following constraint. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them succeed. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if at least one of them succeeds. - - - - - Returns a ConstraintExpression, which will apply - the following constraint to all members of a collection, - succeeding if all of them fail. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Length property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Count property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the Message property of the object being tested. - - - - - Returns a new ConstraintExpression, which will apply the following - constraint to the InnerException property of the object being tested. - - - - - With is currently a NOP - reserved for future use. - - - - - Returns a constraint that tests for null - - - - - Returns a constraint that tests for True - - - - - Returns a constraint that tests for False - - - - - Returns a constraint that tests for a positive value - - - - - Returns a constraint that tests for a negative value - - - - - Returns a constraint that tests for NaN - - - - - Returns a constraint that tests for empty - - - - - Returns a constraint that tests whether a collection - contains all unique items. - - - - - Returns a constraint that tests whether an object graph is serializable in binary format. - - - - - Returns a constraint that tests whether an object graph is serializable in xml format. - - - - - Returns a constraint that tests whether a collection is ordered - - - - - ContainsConstraint tests a whether a string contains a substring - or a collection contains an object. It postpones the decision of - which test to use until the type of the actual argument is known. - This allows testing whether a string is contained in a collection - or as a substring of another string using the same syntax. - - - - - Initializes a new instance of the class. - - The expected. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied Comparison object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IEqualityComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IEqualityComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to ignore case and return self. - - - - - Applies a delay to the match so that a match can be evaluated in the future. - - - - - Creates a new DelayedConstraint - - The inner constraint two decorate - The time interval after which the match is performed - If the value of is less than 0 - - - - Creates a new DelayedConstraint - - The inner constraint two decorate - The time interval after which the match is performed - The time interval used for polling - If the value of is less than 0 - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for if the base constraint fails, false if it succeeds - - - - Test whether the constraint is satisfied by a delegate - - The delegate whose value is to be tested - True for if the base constraint fails, false if it succeeds - - - - Test whether the constraint is satisfied by a given reference. - Overridden to wait for the specified delay period before - calling the base constraint with the dereferenced value. - - A reference to the value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a MessageWriter. - - The writer on which the actual value is displayed - - - - Returns the string representation of the constraint. - - - - - EmptyCollectionConstraint tests whether a collection is empty. - - - - - Check that the collection is empty - - - - - - - Write the constraint description to a MessageWriter - - - - - - EmptyConstraint tests a whether a string or collection is empty, - postponing the decision about which test is applied until the - type of the actual argument is known. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - EmptyDirectoryConstraint is used to test that a directory is empty - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - EmptyStringConstraint tests whether a string is empty. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - EndsWithConstraint can test whether a string ends - with an expected substring. - - - - - StringConstraint is the abstract base for constraints - that operate on strings. It supports the IgnoreCase - modifier for string operations. - - - - - The expected value - - - - - Indicates whether tests should be case-insensitive - - - - - Constructs a StringConstraint given an expected value - - The expected value - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Test whether the constraint is satisfied by a given string - - The string to be tested - True for success, false for failure - - - - Modify the constraint to ignore case in matching. - - - - - Initializes a new instance of the class. - - The expected string - - - - Test whether the constraint is matched by the actual value. - This is a template method, which calls the IsMatch method - of the derived class. - - - - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - EqualConstraint is able to compare an actual value with the - expected value provided in its constructor. Two objects are - considered equal if both are null, or if both have the same - value. NUnit has special semantics for some object types. - - - - - If true, strings in error messages will be clipped - - - - - NUnitEqualityComparer used to test equality. - - - - - Initializes a new instance of the class. - - The expected value. - - - - Flag the constraint to use a tolerance when determining equality. - - Tolerance value to be used - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied Comparison object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IEqualityComparer object. - - The IComparer object to use. - Self. - - - - Flag the constraint to use the supplied IEqualityComparer object. - - The IComparer object to use. - Self. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write a failure message. Overridden to provide custom - failure messages for EqualConstraint. - - The MessageWriter to write to - - - - Write description of this constraint - - The MessageWriter to write to - - - - Display the failure information for two collections that did not match. - - The MessageWriter on which to display - The expected collection. - The actual collection - The depth of this failure in a set of nested collections - - - - Displays a single line showing the types and sizes of the expected - and actual enumerations, collections or arrays. If both are identical, - the value is only shown once. - - The MessageWriter on which to display - The expected collection or array - The actual collection or array - The indentation level for the message line - - - - Displays a single line showing the point in the expected and actual - arrays at which the comparison failed. If the arrays have different - structures or dimensions, both values are shown. - - The MessageWriter on which to display - The expected array - The actual array - Index of the failure point in the underlying collections - The indentation level for the message line - - - - Display the failure information for two IEnumerables that did not match. - - The MessageWriter on which to display - The expected enumeration. - The actual enumeration - The depth of this failure in a set of nested collections - - - - Flag the constraint to ignore case and return self. - - - - - Flag the constraint to suppress string clipping - and return self. - - - - - Flag the constraint to compare arrays as collections - and return self. - - - - - Switches the .Within() modifier to interpret its tolerance as - a distance in representable values (see remarks). - - Self. - - Ulp stands for "unit in the last place" and describes the minimum - amount a given value can change. For any integers, an ulp is 1 whole - digit. For floating point values, the accuracy of which is better - for smaller numbers and worse for larger numbers, an ulp depends - on the size of the number. Using ulps for comparison of floating - point results instead of fixed tolerances is safer because it will - automatically compensate for the added inaccuracy of larger numbers. - - - - - Switches the .Within() modifier to interpret its tolerance as - a percentage that the actual values is allowed to deviate from - the expected value. - - Self - - - - Causes the tolerance to be interpreted as a TimeSpan in days. - - Self - - - - Causes the tolerance to be interpreted as a TimeSpan in hours. - - Self - - - - Causes the tolerance to be interpreted as a TimeSpan in minutes. - - Self - - - - Causes the tolerance to be interpreted as a TimeSpan in seconds. - - Self - - - - Causes the tolerance to be interpreted as a TimeSpan in milliseconds. - - Self - - - - Causes the tolerance to be interpreted as a TimeSpan in clock ticks. - - Self - - - - EqualityAdapter class handles all equality comparisons - that use an IEqualityComparer, IEqualityComparer<T> - or a ComparisonAdapter. - - - - - Compares two objects, returning true if they are equal - - - - - Returns true if the two objects can be compared by this adapter. - The base adapter cannot handle IEnumerables except for strings. - - - - - Returns an EqualityAdapter that wraps an IComparer. - - - - - Returns an EqualityAdapter that wraps an IEqualityComparer. - - - - - Returns an EqualityAdapter that wraps an IEqualityComparer<T>. - - - - - Returns an EqualityAdapter that wraps an IComparer<T>. - - - - - Returns an EqualityAdapter that wraps a Comparison<T>. - - - - - EqualityAdapter that wraps an IComparer. - - - - - Returns true if the two objects can be compared by this adapter. - Generic adapter requires objects of the specified type. - - - - - EqualityAdapter that wraps an IComparer. - - - - - EqualityAdapterList represents a list of EqualityAdapters - in a common class across platforms. - - - - - ExactCountConstraint applies another constraint to each - item in a collection, succeeding only if a specified - number of items succeed. - - - - - Construct an ExactCountConstraint on top of an existing constraint - - - - - - - Apply the item constraint to each item in the collection, - succeeding only if the expected number of items pass. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - ExactTypeConstraint is used to test that an object - is of the exact type provided in the constructor - - - - - Construct an ExactTypeConstraint for a given Type - - The expected Type. - - - - Test that an object is of the exact type specified - - The actual value. - True if the tested object is of the exact type provided, otherwise false. - - - - Write the description of this constraint to a MessageWriter - - The MessageWriter to use - - - - ExceptionTypeConstraint is a special version of ExactTypeConstraint - used to provided detailed info about the exception thrown in - an error message. - - - - - Constructs an ExceptionTypeConstraint - - - - - Write the actual value for a failing constraint test to a - MessageWriter. Overriden to write additional information - in the case of an Exception. - - The MessageWriter to use - - - - FailurePoint class represents one point of failure - in an equality test. - - - - - The location of the failure - - - - - The expected value - - - - - The actual value - - - - - Indicates whether the expected value is valid - - - - - Indicates whether the actual value is valid - - - - - FailurePointList represents a set of FailurePoints - in a cross-platform way. - - - - - FalseConstraint tests that the actual value is false - - - - - Initializes a new instance of the class. - - - - Helper routines for working with floating point numbers - - - The floating point comparison code is based on this excellent article: - http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm - - - "ULP" means Unit in the Last Place and in the context of this library refers to - the distance between two adjacent floating point numbers. IEEE floating point - numbers can only represent a finite subset of natural numbers, with greater - accuracy for smaller numbers and lower accuracy for very large numbers. - - - If a comparison is allowed "2 ulps" of deviation, that means the values are - allowed to deviate by up to 2 adjacent floating point values, which might be - as low as 0.0000001 for small numbers or as high as 10.0 for large numbers. - - - - - Compares two floating point values for equality - First floating point value to be compared - Second floating point value t be compared - - Maximum number of representable floating point values that are allowed to - be between the left and the right floating point values - - True if both numbers are equal or close to being equal - - - Floating point values can only represent a finite subset of natural numbers. - For example, the values 2.00000000 and 2.00000024 can be stored in a float, - but nothing inbetween them. - - - This comparison will count how many possible floating point values are between - the left and the right number. If the number of possible values between both - numbers is less than or equal to maxUlps, then the numbers are considered as - being equal. - - - Implementation partially follows the code outlined here: - http://www.anttirt.net/2007/08/19/proper-floating-point-comparisons/ - - - - - Compares two double precision floating point values for equality - First double precision floating point value to be compared - Second double precision floating point value t be compared - - Maximum number of representable double precision floating point values that are - allowed to be between the left and the right double precision floating point values - - True if both numbers are equal or close to being equal - - - Double precision floating point values can only represent a limited series of - natural numbers. For example, the values 2.0000000000000000 and 2.0000000000000004 - can be stored in a double, but nothing inbetween them. - - - This comparison will count how many possible double precision floating point - values are between the left and the right number. If the number of possible - values between both numbers is less than or equal to maxUlps, then the numbers - are considered as being equal. - - - Implementation partially follows the code outlined here: - http://www.anttirt.net/2007/08/19/proper-floating-point-comparisons/ - - - - - - Reinterprets the memory contents of a floating point value as an integer value - - - Floating point value whose memory contents to reinterpret - - - The memory contents of the floating point value interpreted as an integer - - - - - Reinterprets the memory contents of a double precision floating point - value as an integer value - - - Double precision floating point value whose memory contents to reinterpret - - - The memory contents of the double precision floating point value - interpreted as an integer - - - - - Reinterprets the memory contents of an integer as a floating point value - - Integer value whose memory contents to reinterpret - - The memory contents of the integer value interpreted as a floating point value - - - - - Reinterprets the memory contents of an integer value as a double precision - floating point value - - Integer whose memory contents to reinterpret - - The memory contents of the integer interpreted as a double precision - floating point value - - - - Union of a floating point variable and an integer - - - The union's value as a floating point variable - - - The union's value as an integer - - - The union's value as an unsigned integer - - - Union of a double precision floating point variable and a long - - - The union's value as a double precision floating point variable - - - The union's value as a long - - - The union's value as an unsigned long - - - - Tests whether a value is greater than the value supplied to its constructor - - - - - The value against which a comparison is to be made - - - - - Initializes a new instance of the class. - - The expected value. - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Tests whether a value is greater than or equal to the value supplied to its constructor - - - - - The value against which a comparison is to be made - - - - - Initializes a new instance of the class. - - The expected value. - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - InstanceOfTypeConstraint is used to test that an object - is of the same type provided or derived from it. - - - - - Construct an InstanceOfTypeConstraint for the type provided - - The expected Type - - - - Test whether an object is of the specified type or a derived type - - The object to be tested - True if the object is of the provided type or derives from it, otherwise false. - - - - Write a description of this constraint to a MessageWriter - - The MessageWriter to use - - - - Tests whether a value is less than the value supplied to its constructor - - - - - The value against which a comparison is to be made - - - - - Initializes a new instance of the class. - - The expected value. - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Tests whether a value is less than or equal to the value supplied to its constructor - - - - - The value against which a comparison is to be made - - - - - Initializes a new instance of the class. - - The expected value. - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Static methods used in creating messages - - - - - Static string used when strings are clipped - - - - - Returns the representation of a type as used in NUnitLite. - This is the same as Type.ToString() except for arrays, - which are displayed with their declared sizes. - - - - - - - Converts any control characters in a string - to their escaped representation. - - The string to be converted - The converted string - - - - Return the a string representation for a set of indices into an array - - Array of indices for which a string is needed - - - - Get an array of indices representing the point in a enumerable, - collection or array corresponding to a single int index into the - collection. - - The collection to which the indices apply - Index in the collection - Array of indices - - - - Clip a string to a given length, starting at a particular offset, returning the clipped - string with ellipses representing the removed parts - - The string to be clipped - The maximum permitted length of the result string - The point at which to start clipping - The clipped string - - - - Clip the expected and actual strings in a coordinated fashion, - so that they may be displayed together. - - - - - - - - - Shows the position two strings start to differ. Comparison - starts at the start index. - - The expected string - The actual string - The index in the strings at which comparison should start - Boolean indicating whether case should be ignored - -1 if no mismatch found, or the index where mismatch found - - - - NaNConstraint tests that the actual value is a double or float NaN - - - - - Test that the actual value is an NaN - - - - - - - Write the constraint description to a specified writer - - - - - - NoItemConstraint applies another constraint to each - item in a collection, failing if any of them succeeds. - - - - - Construct a NoItemConstraint on top of an existing constraint - - - - - - Apply the item constraint to each item in the collection, - failing if any item fails. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - NotConstraint negates the effect of some other constraint - - - - - Initializes a new instance of the class. - - The base constraint to be negated. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for if the base constraint fails, false if it succeeds - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a MessageWriter. - - The writer on which the actual value is displayed - - - - NullConstraint tests that the actual value is null - - - - - Initializes a new instance of the class. - - - - - NullEmptyStringConstraint tests whether a string is either null or empty. - - - - - Constructs a new NullOrEmptyStringConstraint - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - The Numerics class contains common operations on numeric values. - - - - - Checks the type of the object, returning true if - the object is a numeric type. - - The object to check - true if the object is a numeric type - - - - Checks the type of the object, returning true if - the object is a floating point numeric type. - - The object to check - true if the object is a floating point numeric type - - - - Checks the type of the object, returning true if - the object is a fixed point numeric type. - - The object to check - true if the object is a fixed point numeric type - - - - Test two numeric values for equality, performing the usual numeric - conversions and using a provided or default tolerance. If the tolerance - provided is Empty, this method may set it to a default tolerance. - - The expected value - The actual value - A reference to the tolerance in effect - True if the values are equal - - - - Compare two numeric values, performing the usual numeric conversions. - - The expected value - The actual value - The relationship of the values to each other - - - - NUnitComparer encapsulates NUnit's default behavior - in comparing two objects. - - - - - Compares two objects - - - - - - - - Returns the default NUnitComparer. - - - - - Generic version of NUnitComparer - - - - - - Compare two objects of the same type - - - - - NUnitEqualityComparer encapsulates NUnit's handling of - equality tests between objects. - - - - - - - - - - Compares two objects for equality within a tolerance - - The first object to compare - The second object to compare - The tolerance to use in the comparison - - - - - If true, all string comparisons will ignore case - - - - - If true, arrays will be treated as collections, allowing - those of different dimensions to be compared - - - - - Comparison objects used in comparisons for some constraints. - - - - - List of points at which a failure occured. - - - - - RecursionDetector used to check for recursion when - evaluating self-referencing enumerables. - - - - - Compares two objects for equality within a tolerance, setting - the tolerance to the actual tolerance used if an empty - tolerance is supplied. - - - - - Helper method to compare two arrays - - - - - Method to compare two DirectoryInfo objects - - first directory to compare - second directory to compare - true if equivalent, false if not - - - - Returns the default NUnitEqualityComparer - - - - - Gets and sets a flag indicating whether case should - be ignored in determining equality. - - - - - Gets and sets a flag indicating that arrays should be - compared as collections, without regard to their shape. - - - - - Gets the list of external comparers to be used to - test for equality. They are applied to members of - collections, in place of NUnit's own logic. - - - - - Gets the list of failure points for the last Match performed. - The list consists of objects to be interpreted by the caller. - This generally means that the caller may only make use of - objects it has placed on the list at a particular depthy. - - - - - RecursionDetector detects when a comparison - between two enumerables has reached a point - where the same objects that were previously - compared are again being compared. This allows - the caller to stop the comparison if desired. - - - - - Check whether two objects have previously - been compared, returning true if they have. - The two objects are remembered, so that a - second call will always return true. - - - - - OrConstraint succeeds if either member succeeds - - - - - Create an OrConstraint from two other constraints - - The first constraint - The second constraint - - - - Apply the member constraints to an actual value, succeeding - succeeding as soon as one of them succeeds. - - The actual value - True if either constraint succeeded - - - - Write a description for this contraint to a MessageWriter - - The MessageWriter to receive the description - - - - PathConstraint serves as the abstract base of constraints - that operate on paths and provides several helper methods. - - - - - The expected path used in the constraint - - - - - Flag indicating whether a caseInsensitive comparison should be made - - - - - Construct a PathConstraint for a give expected path - - The expected path - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Returns true if the expected path and actual path match - - - - - Returns the string representation of this constraint - - - - - Transform the provided path to its canonical form so that it - may be more easily be compared with other paths. - - The original path - The path in canonical form - - - - Test whether one path in canonical form is under another. - - The first path - supposed to be the parent path - The second path - supposed to be the child path - Indicates whether case should be ignored - - - - - Modifies the current instance to be case-insensitve - and returns it. - - - - - Modifies the current instance to be case-sensitve - and returns it. - - - - - Predicate constraint wraps a Predicate in a constraint, - returning success if the predicate is true. - - - - - Construct a PredicateConstraint from a predicate - - - - - Determines whether the predicate succeeds when applied - to the actual value. - - - - - Writes the description to a MessageWriter - - - - - PropertyConstraint extracts a named property and uses - its value as the actual value for a chained constraint. - - - - - Initializes a new instance of the class. - - The name. - The constraint to apply to the property. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - Returns the string representation of the constraint. - - - - - - PropertyExistsConstraint tests that a named property - exists on the object provided through Match. - - Originally, PropertyConstraint provided this feature - in addition to making optional tests on the vaue - of the property. The two constraints are now separate. - - - - - Initializes a new instance of the class. - - The name of the property. - - - - Test whether the property exists for a given object - - The object to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. - - The writer on which the actual value is displayed - - - - Returns the string representation of the constraint. - - - - - - RangeConstraint tests whether two values are within a - specified range. - - - - - Initializes a new instance of the class. - - From. - To. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - RegexConstraint can test whether a string matches - the pattern provided. - - - - - Initializes a new instance of the class. - - The pattern. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - ResolvableConstraintExpression is used to represent a compound - constraint being constructed at a point where the last operator - may either terminate the expression or may have additional - qualifying constraints added to it. - - It is used, for example, for a Property element or for - an Exception element, either of which may be optionally - followed by constraints that apply to the property or - exception. - - - - - Create a new instance of ResolvableConstraintExpression - - - - - Create a new instance of ResolvableConstraintExpression, - passing in a pre-populated ConstraintBuilder. - - - - - Resolve the current expression to a Constraint - - - - - This operator creates a constraint that is satisfied only if both - argument constraints are satisfied. - - - - - This operator creates a constraint that is satisfied only if both - argument constraints are satisfied. - - - - - This operator creates a constraint that is satisfied only if both - argument constraints are satisfied. - - - - - This operator creates a constraint that is satisfied if either - of the argument constraints is satisfied. - - - - - This operator creates a constraint that is satisfied if either - of the argument constraints is satisfied. - - - - - This operator creates a constraint that is satisfied if either - of the argument constraints is satisfied. - - - - - This operator creates a constraint that is satisfied if the - argument constraint is not satisfied. - - - - - Appends an And Operator to the expression - - - - - Appends an Or operator to the expression. - - - - - ReusableConstraint wraps a constraint expression after - resolving it so that it can be reused consistently. - - - - - Construct a ReusableConstraint from a constraint expression - - The expression to be resolved and reused - - - - Converts a constraint to a ReusableConstraint - - The constraint to be converted - A ReusableConstraint - - - - Returns the string representation of the constraint. - - A string representing the constraint - - - - Resolves the ReusableConstraint by returning the constraint - that it originally wrapped. - - A resolved constraint - - - - SameAsConstraint tests whether an object is identical to - the object passed to its constructor - - - - - Initializes a new instance of the class. - - The expected object. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Summary description for SamePathConstraint. - - - - - Initializes a new instance of the class. - - The expected path - - - - Test whether the constraint is satisfied by a given value - - The expected path - The actual path - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - SamePathOrUnderConstraint tests that one path is under another - - - - - Initializes a new instance of the class. - - The expected path - - - - Test whether the constraint is satisfied by a given value - - The expected path - The actual path - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - SomeItemsConstraint applies another constraint to each - item in a collection, succeeding if any of them succeeds. - - - - - Construct a SomeItemsConstraint on top of an existing constraint - - - - - - Apply the item constraint to each item in the collection, - succeeding if any item succeeds. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - StartsWithConstraint can test whether a string starts - with an expected substring. - - - - - Initializes a new instance of the class. - - The expected string - - - - Test whether the constraint is matched by the actual value. - This is a template method, which calls the IsMatch method - of the derived class. - - - - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - SubPathConstraint tests that the actual path is under the expected path - - - - - Initializes a new instance of the class. - - The expected path - - - - Test whether the constraint is satisfied by a given value - - The expected path - The actual path - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - SubstringConstraint can test whether a string contains - the expected substring. - - - - - Initializes a new instance of the class. - - The expected. - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - ThrowsConstraint is used to test the exception thrown by - a delegate by applying a constraint to it. - - - - - Initializes a new instance of the class, - using a constraint to be applied to the exception. - - A constraint to apply to the caught exception. - - - - Executes the code of the delegate and captures any exception. - If a non-null base constraint was provided, it applies that - constraint to the exception. - - A delegate representing the code to be tested - True if an exception is thrown and the constraint succeeds, otherwise false - - - - Converts an ActualValueDelegate to a TestDelegate - before calling the primary overload. - - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - Returns the string representation of this constraint - - - - - Get the actual exception thrown - used by Assert.Throws. - - - - - ThrowsNothingConstraint tests that a delegate does not - throw an exception. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True if no exception is thrown, otherwise false - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. Overridden in ThrowsNothingConstraint to write - information about the exception that was actually caught. - - The writer on which the actual value is displayed - - - - The Tolerance class generalizes the notion of a tolerance - within which an equality test succeeds. Normally, it is - used with numeric types, but it can be used with any - type that supports taking a difference between two - objects and comparing that difference to a value. - - - - - Constructs a linear tolerance of a specdified amount - - - - - Constructs a tolerance given an amount and ToleranceMode - - - - - Tests that the current Tolerance is linear with a - numeric value, throwing an exception if it is not. - - - - - Returns an empty Tolerance object, equivalent to - specifying no tolerance. In most cases, it results - in an exact match but for floats and doubles a - default tolerance may be used. - - - - - Returns a zero Tolerance object, equivalent to - specifying an exact match. - - - - - Gets the ToleranceMode for the current Tolerance - - - - - Gets the value of the current Tolerance instance. - - - - - Returns a new tolerance, using the current amount as a percentage. - - - - - Returns a new tolerance, using the current amount in Ulps. - - - - - Returns a new tolerance with a TimeSpan as the amount, using - the current amount as a number of days. - - - - - Returns a new tolerance with a TimeSpan as the amount, using - the current amount as a number of hours. - - - - - Returns a new tolerance with a TimeSpan as the amount, using - the current amount as a number of minutes. - - - - - Returns a new tolerance with a TimeSpan as the amount, using - the current amount as a number of seconds. - - - - - Returns a new tolerance with a TimeSpan as the amount, using - the current amount as a number of milliseconds. - - - - - Returns a new tolerance with a TimeSpan as the amount, using - the current amount as a number of clock ticks. - - - - - Returns true if the current tolerance is empty. - - - - - Modes in which the tolerance value for a comparison can be interpreted. - - - - - The tolerance was created with a value, without specifying - how the value would be used. This is used to prevent setting - the mode more than once and is generally changed to Linear - upon execution of the test. - - - - - The tolerance is used as a numeric range within which - two compared values are considered to be equal. - - - - - Interprets the tolerance as the percentage by which - the two compared values my deviate from each other. - - - - - Compares two values based in their distance in - representable numbers. - - - - - TrueConstraint tests that the actual value is true - - - - - Initializes a new instance of the class. - - - - - UniqueItemsConstraint tests whether all the items in a - collection are unique. - - - - - Check that all items are unique. - - - - - - - Write a description of this constraint to a MessageWriter - - - - - - XmlSerializableConstraint tests whether - an object is serializable in XML format. - - - - - Test whether the constraint is satisfied by a given value - - The value to be tested - True for success, false for failure - - - - Write the constraint description to a MessageWriter - - The writer on which the description is displayed - - - - Write the actual value for a failing constraint test to a - MessageWriter. The default implementation simply writes - the raw value of actual, leaving it to the writer to - perform any formatting. - - The writer on which the actual value is displayed - - - - Returns the string representation of this constraint - - - - - Represents a constraint that succeeds if all the - members of a collection match a base constraint. - - - - - Abstract base for operators that indicate how to - apply a constraint to items in a collection. - - - - - PrefixOperator takes a single constraint and modifies - it's action in some way. - - - - - The ConstraintOperator class is used internally by a - ConstraintBuilder to represent an operator that - modifies or combines constraints. - - Constraint operators use left and right precedence - values to determine whether the top operator on the - stack should be reduced before pushing a new operator. - - - - - The precedence value used when the operator - is about to be pushed to the stack. - - - - - The precedence value used when the operator - is on the top of the stack. - - - - - Reduce produces a constraint from the operator and - any arguments. It takes the arguments from the constraint - stack and pushes the resulting constraint on it. - - - - - - The syntax element preceding this operator - - - - - The syntax element folowing this operator - - - - - The precedence value used when the operator - is about to be pushed to the stack. - - - - - The precedence value used when the operator - is on the top of the stack. - - - - - Reduce produces a constraint from the operator and - any arguments. It takes the arguments from the constraint - stack and pushes the resulting constraint on it. - - - - - - Returns the constraint created by applying this - prefix to another constraint. - - - - - - - Constructs a CollectionOperator - - - - - Returns a constraint that will apply the argument - to the members of a collection, succeeding if - they all succeed. - - - - - Operator that requires both it's arguments to succeed - - - - - Abstract base class for all binary operators - - - - - Reduce produces a constraint from the operator and - any arguments. It takes the arguments from the constraint - stack and pushes the resulting constraint on it. - - - - - - Abstract method that produces a constraint by applying - the operator to its left and right constraint arguments. - - - - - Gets the left precedence of the operator - - - - - Gets the right precedence of the operator - - - - - Construct an AndOperator - - - - - Apply the operator to produce an AndConstraint - - - - - Operator that tests for the presence of a particular attribute - on a type and optionally applies further tests to the attribute. - - - - - Abstract base class for operators that are able to reduce to a - constraint whether or not another syntactic element follows. - - - - - Construct an AttributeOperator for a particular Type - - The Type of attribute tested - - - - Reduce produces a constraint from the operator and - any arguments. It takes the arguments from the constraint - stack and pushes the resulting constraint on it. - - - - - Represents a constraint that succeeds if the specified - count of members of a collection match a base constraint. - - - - - Construct an ExactCountOperator for a specified count - - The expected count - - - - Returns a constraint that will apply the argument - to the members of a collection, succeeding if - none of them succeed. - - - - - Represents a constraint that succeeds if none of the - members of a collection match a base constraint. - - - - - Returns a constraint that will apply the argument - to the members of a collection, succeeding if - none of them succeed. - - - - - Negates the test of the constraint it wraps. - - - - - Constructs a new NotOperator - - - - - Returns a NotConstraint applied to its argument. - - - - - Operator that requires at least one of it's arguments to succeed - - - - - Construct an OrOperator - - - - - Apply the operator to produce an OrConstraint - - - - - Operator used to test for the presence of a named Property - on an object and optionally apply further tests to the - value of that property. - - - - - Constructs a PropOperator for a particular named property - - - - - Reduce produces a constraint from the operator and - any arguments. It takes the arguments from the constraint - stack and pushes the resulting constraint on it. - - - - - - Gets the name of the property to which the operator applies - - - - - Represents a constraint that succeeds if any of the - members of a collection match a base constraint. - - - - - Returns a constraint that will apply the argument - to the members of a collection, succeeding if - any of them succeed. - - - - - Operator that tests that an exception is thrown and - optionally applies further tests to the exception. - - - - - Construct a ThrowsOperator - - - - - Reduce produces a constraint from the operator and - any arguments. It takes the arguments from the constraint - stack and pushes the resulting constraint on it. - - - - - Represents a constraint that simply wraps the - constraint provided as an argument, without any - further functionality, but which modifes the - order of evaluation because of its precedence. - - - - - Constructor for the WithOperator - - - - - Returns a constraint that wraps its argument - - - - - Thrown when an assertion failed. - - - - The error message that explains - the reason for the exception - - - The error message that explains - the reason for the exception - The exception that caused the - current exception - - - - Serialization Constructor - - - - - Thrown when an assertion failed. - - - - - - - The error message that explains - the reason for the exception - The exception that caused the - current exception - - - - Serialization Constructor - - - - - Thrown when a test executes inconclusively. - - - - The error message that explains - the reason for the exception - - - The error message that explains - the reason for the exception - The exception that caused the - current exception - - - - Serialization Constructor - - - - - Thrown when an assertion failed. - - - - - - - The error message that explains - the reason for the exception - The exception that caused the - current exception - - - - Serialization Constructor - - - - - - - - - - - Compares two objects of a given Type for equality within a tolerance - - The first object to compare - The second object to compare - The tolerance to use in the comparison - - - - diff --git a/src/packages/NUnit.2.6.3/license.txt b/src/packages/NUnit.2.6.3/license.txt deleted file mode 100644 index b12903af..00000000 --- a/src/packages/NUnit.2.6.3/license.txt +++ /dev/null @@ -1,15 +0,0 @@ -Copyright � 2002-2013 Charlie Poole -Copyright � 2002-2004 James W. Newkirk, Michael C. Two, Alexei A. Vorontsov -Copyright � 2000-2002 Philip A. Craig - -This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment (see the following) in the product documentation is required. - -Portions Copyright � 2002-2013 Charlie Poole or Copyright � 2002-2004 James W. Newkirk, Michael C. Two, Alexei A. Vorontsov or Copyright � 2000-2002 Philip A. Craig - -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. - -3. This notice may not be removed or altered from any source distribution. diff --git a/src/packages/repositories.config b/src/packages/repositories.config deleted file mode 100644 index 2390a9ac..00000000 --- a/src/packages/repositories.config +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file diff --git a/src/sentinel/redis-6380/redis.conf b/src/sentinel/redis-6380/redis.conf new file mode 100644 index 00000000..faf4eaf4 --- /dev/null +++ b/src/sentinel/redis-6380/redis.conf @@ -0,0 +1,5 @@ +# Relative to ./src/sentinel +include redis.conf + +port 6380 +dir ./redis-6380/state diff --git a/src/sentinel/redis-6380/redis.windows.conf b/src/sentinel/redis-6380/redis.windows.conf index 91324109..c187d09b 100644 --- a/src/sentinel/redis-6380/redis.windows.conf +++ b/src/sentinel/redis-6380/redis.windows.conf @@ -34,7 +34,7 @@ # On Windows, daemonize and pidfile are not supported. # However, you can run redis as a Windows service, and specify a logfile. -# The logfile will contain the pid. +# The logfile will contain the pid. # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. @@ -96,12 +96,12 @@ tcp-keepalive 0 loglevel verbose # Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. +# Redis to log on the standard output. logfile "" -# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to # yes, and optionally update the other syslog parameters to suit your needs. -# If Redis is installed and launched as a Windows Service, this will +# If Redis is installed and launched as a Windows Service, this will # automatically be enabled. # syslog-enabled no @@ -176,9 +176,9 @@ dbfilename "dump.rdb" # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. -# +# # The Append Only File will also be created inside this directory. -# +# # Note that you must specify a directory here, not a file name. dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" @@ -379,7 +379,7 @@ slave-priority 100 # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). -# +# # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. @@ -418,63 +418,14 @@ slave-priority 100 # # maxclients 10000 -# The Linux version of Redis relies on the system call fork() to perform -# point-in-time snapshots of the heap. In addition to the AOF and RDB backup -# mechanism, the master-slave synchronization and clustering features are -# dependent on this behavior of fork(). In order for the Windows version to -# perform like the Linux version we had to simulate this aspect of fork(). -# Doing so meant moving the Redis heap into a memory mapped file that can -# be shared with a child process. -# -# *** There must be disk space available for this file in order for Redis -# to launch. *** The default configuration places this file in the local -# appdata directory. If you wish to move this file to another local disk, -# use the heapdir flag as described below. -# -# The maxheap flag controls the maximum size of this memory mapped file, -# as well as the total usable space for the Redis heap. Running Redis -# without either maxheap or maxmemory will result in a memory mapped file -# being created that is equal to the size of physical memory. During -# fork() operations the total page file commit will max out at around: -# -# (size of physical memory) + (2 * size of maxheap) -# -# For instance, on a machine with 8GB of physical RAM, the max page file -# commit with the default maxheap size will be (8)+(2*8) GB , or 24GB. The -# default page file sizing of Windows will allow for this without having -# to reconfigure the system. Larger heap sizes are possible, but the maximum -# page file size will have to be increased accordingly. -# -# The Redis heap must be larger than the value specified by the maxmemory -# flag, as the heap allocator has its own memory requirements and -# fragmentation of the heap is inevitable. If only the maxmemory flag is -# specified, maxheap will be set at 1.5*maxmemory. If the maxheap flag is -# specified along with maxmemory, the maxheap flag will be automatically -# increased if it is smaller than 1.5*maxmemory. -# -# maxheap -maxheap 1gb - -# The heap memory mapped file must reside on a local path for heap sharing -# between processes to work. A UNC path will not suffice here. For maximum -# performance this should be located on the fastest local drive available. -# This value defaults to the local application data folder(e.g., -# "%USERPROFILE%\AppData\Local"). Since this file can be very large, you -# may wish to place this on a drive other than the one the operating system -# is installed on. -# -# Note that you must specify a directory here, not a file name. -# heapdir -heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380 - # If Redis is to be used as an in-memory-only cache without any kind of # persistence, then the fork() mechanism used by the background AOF/RDB -# persistence is unneccessary. As an optimization, all persistence can be -# turned off in the Windows version of Redis. This will disable the creation of -# the memory mapped heap file, redirect heap allocations to the system heap -# allocator, and disable commands that would otherwise cause fork() operations: -# BGSAVE and BGREWRITEAOF. This flag may not be combined with any of the other -# flags that configure AOF and RDB operations. +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. # persistence-available [(yes)|no] # Don't use more memory than the specified amount of bytes. @@ -500,18 +451,33 @@ heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380 # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: -# +# # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key according to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations -# +# # Note: with any of the above policies, Redis will return an error on write # operations, when there are no suitable keys for eviction. # @@ -600,7 +566,7 @@ appendfsync everysec # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). -# +# # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no @@ -608,7 +574,7 @@ no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. -# +# # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). @@ -667,6 +633,114 @@ aof-load-truncated yes # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified @@ -675,7 +749,7 @@ lua-time-limit 5000 # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). -# +# # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the @@ -806,7 +880,7 @@ hll-sparse-max-bytes 3000 # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. -# +# # The default is to use this millisecond 10 times every second in order to # actively rehash the main dictionaries, freeing memory when possible. # diff --git a/src/sentinel/redis-6380/sentinel.conf b/src/sentinel/redis-6380/sentinel.conf index 6d743a88..561fdb58 100644 --- a/src/sentinel/redis-6380/sentinel.conf +++ b/src/sentinel/redis-6380/sentinel.conf @@ -1,180 +1,7 @@ -# Redis1 sentinel.conf +# Relative to ./src/sentinel -# port -# The port that this sentinel instance will run on port 26380 - -# sentinel announce-ip -# sentinel announce-port -# -# The above two configuration directives are useful in environments where, -# because of NAT, Sentinel is reachable from outside via a non-local address. -# -# When announce-ip is provided, the Sentinel will claim the specified IP address -# in HELLO messages used to gossip its presence, instead of auto-detecting the -# local address as it usually does. -# -# Similarly when announce-port is provided and is valid and non-zero, Sentinel -# will announce the specified TCP port. -# -# The two options don't need to be used together, if only announce-ip is -# provided, the Sentinel will announce the specified IP and the server port -# as specified by the "port" option. If only announce-port is provided, the -# Sentinel will announce the auto-detected local IP and the specified port. -# -# Example: -# -# sentinel announce-ip 1.2.3.4 - -# dir -# Every long running process should have a well-defined working directory. -# For Redis Sentinel to chdir to /tmp at startup is the simplest thing -# for the process to don't interfere with administrative tasks such as -# unmounting filesystems. -dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" - -# sentinel monitor -# -# Tells Sentinel to monitor this master, and to consider it in O_DOWN -# (Objectively Down) state only if at least sentinels agree. -# -# Note that whatever is the ODOWN quorum, a Sentinel will require to -# be elected by the majority of the known Sentinels in order to -# start a failover, so no failover can be performed in minority. -# -# Slaves are auto-discovered, so you don't need to specify slaves in -# any way. Sentinel itself will rewrite this configuration file adding -# the slaves using additional configuration options. -# Also note that the configuration file is rewritten when a -# slave is promoted to master. -# -# Note: master name should not include special characters or spaces. -# The valid charset is A-z 0-9 and the three characters ".-_". +dir ./redis-6380/state sentinel monitor mymaster 127.0.0.1 6380 2 +protected-mode no -# sentinel auth-pass -# -# Set the password to use to authenticate with the master and slaves. -# Useful if there is a password set in the Redis instances to monitor. -# -# Note that the master password is also used for slaves, so it is not -# possible to set a different password in masters and slaves instances -# if you want to be able to monitor these instances with Sentinel. -# -# However you can have Redis instances without the authentication enabled -# mixed with Redis instances requiring the authentication (as long as the -# password set is the same for all the instances requiring the password) as -# the AUTH command will have no effect in Redis instances with authentication -# switched off. -# -# Example: -# -# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd - -# sentinel down-after-milliseconds -# -# Number of milliseconds the master (or any attached slave or sentinel) should -# be unreachable (as in, not acceptable reply to PING, continuously, for the -# specified period) in order to consider it in S_DOWN state (Subjectively -# Down). -# -# Default is 30 seconds. -sentinel config-epoch mymaster 4 - -# sentinel parallel-syncs -# -# How many slaves we can reconfigure to point to the new slave simultaneously -# during the failover. Use a low number if you use the slaves to serve query -# to avoid that all the slaves will be unreachable at about the same -# time while performing the synchronization with the master. -sentinel leader-epoch mymaster 4 - -# sentinel failover-timeout -# -# Specifies the failover timeout in milliseconds. It is used in many ways: -# -# - The time needed to re-start a failover after a previous failover was -# already tried against the same master by a given Sentinel, is two -# times the failover timeout. -# -# - The time needed for a slave replicating to a wrong master according -# to a Sentinel current configuration, to be forced to replicate -# with the right master, is exactly the failover timeout (counting since -# the moment a Sentinel detected the misconfiguration). -# -# - The time needed to cancel a failover that is already in progress but -# did not produced any configuration change (SLAVEOF NO ONE yet not -# acknowledged by the promoted slave). -# -# - The maximum time a failover in progress waits for all the slaves to be -# reconfigured as slaves of the new master. However even after this time -# the slaves will be reconfigured by the Sentinels anyway, but not with -# the exact parallel-syncs progression as specified. -# -# Default is 3 minutes. -sentinel known-slave mymaster 127.0.0.1 6381 -sentinel known-slave mymaster 127.0.0.1 6382 - -# SCRIPTS EXECUTION -# -# sentinel notification-script and sentinel reconfig-script are used in order -# to configure scripts that are called to notify the system administrator -# or to reconfigure clients after a failover. The scripts are executed -# with the following rules for error handling: -# -# If script exits with "1" the execution is retried later (up to a maximum -# number of times currently set to 10). -# -# If script exits with "2" (or an higher value) the script execution is -# not retried. -# -# If script terminates because it receives a signal the behavior is the same -# as exit code 1. -# -# A script has a maximum running time of 60 seconds. After this limit is -# reached the script is terminated with a SIGKILL and the execution retried. - -# NOTIFICATION SCRIPT -# -# sentinel notification-script -# -# Call the specified notification script for any sentinel event that is -# generated in the WARNING level (for instance -sdown, -odown, and so forth). -# This script should notify the system administrator via email, SMS, or any -# other messaging system, that there is something wrong with the monitored -# Redis systems. -# -# The script is called with just two arguments: the first is the event type -# and the second the event description. -# -# The script must exist and be executable in order for sentinel to start if -# this option is provided. -# -# Example: -# -# sentinel notification-script mymaster /var/redis/notify.sh - -# CLIENTS RECONFIGURATION SCRIPT -# -# sentinel client-reconfig-script -# -# When the master changed because of a failover a script can be called in -# order to perform application-specific tasks to notify the clients that the -# configuration has changed and the master is at a different address. -# -# The following arguments are passed to the script: -# -# -# -# is currently always "failover" -# is either "leader" or "observer" -# -# The arguments from-ip, from-port, to-ip, to-port are used to communicate -# the old address of the master and the new address of the elected slave -# (now a master). -# -# This script should be resistant to multiple invocations. -# -# Example: -# -# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6380/sentinel.windows.conf b/src/sentinel/redis-6380/sentinel.windows.conf new file mode 100644 index 00000000..cd89e805 --- /dev/null +++ b/src/sentinel/redis-6380/sentinel.windows.conf @@ -0,0 +1,181 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26380 +bind 127.0.0.1 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6380" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6381/redis.conf b/src/sentinel/redis-6381/redis.conf new file mode 100644 index 00000000..0d21c604 --- /dev/null +++ b/src/sentinel/redis-6381/redis.conf @@ -0,0 +1,6 @@ +# Relative to ./src/sentinel +include redis.conf + +port 6381 +dir ./redis-6381/state +slaveof 127.0.0.1 6380 diff --git a/src/sentinel/redis-6381/redis.windows.conf b/src/sentinel/redis-6381/redis.windows.conf index 13e49137..70b73a7e 100644 --- a/src/sentinel/redis-6381/redis.windows.conf +++ b/src/sentinel/redis-6381/redis.windows.conf @@ -34,7 +34,7 @@ # On Windows, daemonize and pidfile are not supported. # However, you can run redis as a Windows service, and specify a logfile. -# The logfile will contain the pid. +# The logfile will contain the pid. # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. @@ -96,12 +96,12 @@ tcp-keepalive 0 loglevel verbose # Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. +# Redis to log on the standard output. logfile "" -# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to # yes, and optionally update the other syslog parameters to suit your needs. -# If Redis is installed and launched as a Windows Service, this will +# If Redis is installed and launched as a Windows Service, this will # automatically be enabled. # syslog-enabled no @@ -176,9 +176,9 @@ dbfilename "dump.rdb" # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. -# +# # The Append Only File will also be created inside this directory. -# +# # Note that you must specify a directory here, not a file name. dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" @@ -379,7 +379,7 @@ slave-priority 100 # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). -# +# # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. @@ -418,63 +418,14 @@ slave-priority 100 # # maxclients 10000 -# The Linux version of Redis relies on the system call fork() to perform -# point-in-time snapshots of the heap. In addition to the AOF and RDB backup -# mechanism, the master-slave synchronization and clustering features are -# dependent on this behavior of fork(). In order for the Windows version to -# perform like the Linux version we had to simulate this aspect of fork(). -# Doing so meant moving the Redis heap into a memory mapped file that can -# be shared with a child process. -# -# *** There must be disk space available for this file in order for Redis -# to launch. *** The default configuration places this file in the local -# appdata directory. If you wish to move this file to another local disk, -# use the heapdir flag as described below. -# -# The maxheap flag controls the maximum size of this memory mapped file, -# as well as the total usable space for the Redis heap. Running Redis -# without either maxheap or maxmemory will result in a memory mapped file -# being created that is equal to the size of physical memory. During -# fork() operations the total page file commit will max out at around: -# -# (size of physical memory) + (2 * size of maxheap) -# -# For instance, on a machine with 8GB of physical RAM, the max page file -# commit with the default maxheap size will be (8)+(2*8) GB , or 24GB. The -# default page file sizing of Windows will allow for this without having -# to reconfigure the system. Larger heap sizes are possible, but the maximum -# page file size will have to be increased accordingly. -# -# The Redis heap must be larger than the value specified by the maxmemory -# flag, as the heap allocator has its own memory requirements and -# fragmentation of the heap is inevitable. If only the maxmemory flag is -# specified, maxheap will be set at 1.5*maxmemory. If the maxheap flag is -# specified along with maxmemory, the maxheap flag will be automatically -# increased if it is smaller than 1.5*maxmemory. -# -# maxheap -maxheap 1gb - -# The heap memory mapped file must reside on a local path for heap sharing -# between processes to work. A UNC path will not suffice here. For maximum -# performance this should be located on the fastest local drive available. -# This value defaults to the local application data folder(e.g., -# "%USERPROFILE%\AppData\Local"). Since this file can be very large, you -# may wish to place this on a drive other than the one the operating system -# is installed on. -# -# Note that you must specify a directory here, not a file name. -# heapdir -heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381 - # If Redis is to be used as an in-memory-only cache without any kind of # persistence, then the fork() mechanism used by the background AOF/RDB -# persistence is unneccessary. As an optimization, all persistence can be -# turned off in the Windows version of Redis. This will disable the creation of -# the memory mapped heap file, redirect heap allocations to the system heap -# allocator, and disable commands that would otherwise cause fork() operations: -# BGSAVE and BGREWRITEAOF. This flag may not be combined with any of the other -# flags that configure AOF and RDB operations. +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. # persistence-available [(yes)|no] # Don't use more memory than the specified amount of bytes. @@ -500,18 +451,33 @@ heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381 # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: -# +# # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key according to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations -# +# # Note: with any of the above policies, Redis will return an error on write # operations, when there are no suitable keys for eviction. # @@ -600,7 +566,7 @@ appendfsync everysec # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). -# +# # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no @@ -608,7 +574,7 @@ no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. -# +# # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). @@ -667,6 +633,114 @@ aof-load-truncated yes # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified @@ -675,7 +749,7 @@ lua-time-limit 5000 # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). -# +# # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the @@ -806,7 +880,7 @@ hll-sparse-max-bytes 3000 # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. -# +# # The default is to use this millisecond 10 times every second in order to # actively rehash the main dictionaries, freeing memory when possible. # diff --git a/src/sentinel/redis-6381/sentinel.conf b/src/sentinel/redis-6381/sentinel.conf index a0b77a3b..1b730fbf 100644 --- a/src/sentinel/redis-6381/sentinel.conf +++ b/src/sentinel/redis-6381/sentinel.conf @@ -1,180 +1,7 @@ -# Redis1 sentinel.conf +# Relative to ./src/sentinel -# port -# The port that this sentinel instance will run on port 26381 - -# sentinel announce-ip -# sentinel announce-port -# -# The above two configuration directives are useful in environments where, -# because of NAT, Sentinel is reachable from outside via a non-local address. -# -# When announce-ip is provided, the Sentinel will claim the specified IP address -# in HELLO messages used to gossip its presence, instead of auto-detecting the -# local address as it usually does. -# -# Similarly when announce-port is provided and is valid and non-zero, Sentinel -# will announce the specified TCP port. -# -# The two options don't need to be used together, if only announce-ip is -# provided, the Sentinel will announce the specified IP and the server port -# as specified by the "port" option. If only announce-port is provided, the -# Sentinel will announce the auto-detected local IP and the specified port. -# -# Example: -# -# sentinel announce-ip 1.2.3.4 - -# dir -# Every long running process should have a well-defined working directory. -# For Redis Sentinel to chdir to /tmp at startup is the simplest thing -# for the process to don't interfere with administrative tasks such as -# unmounting filesystems. -dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" - -# sentinel monitor -# -# Tells Sentinel to monitor this master, and to consider it in O_DOWN -# (Objectively Down) state only if at least sentinels agree. -# -# Note that whatever is the ODOWN quorum, a Sentinel will require to -# be elected by the majority of the known Sentinels in order to -# start a failover, so no failover can be performed in minority. -# -# Slaves are auto-discovered, so you don't need to specify slaves in -# any way. Sentinel itself will rewrite this configuration file adding -# the slaves using additional configuration options. -# Also note that the configuration file is rewritten when a -# slave is promoted to master. -# -# Note: master name should not include special characters or spaces. -# The valid charset is A-z 0-9 and the three characters ".-_". +dir ./redis-6381/state sentinel monitor mymaster 127.0.0.1 6380 2 +protected-mode no -# sentinel auth-pass -# -# Set the password to use to authenticate with the master and slaves. -# Useful if there is a password set in the Redis instances to monitor. -# -# Note that the master password is also used for slaves, so it is not -# possible to set a different password in masters and slaves instances -# if you want to be able to monitor these instances with Sentinel. -# -# However you can have Redis instances without the authentication enabled -# mixed with Redis instances requiring the authentication (as long as the -# password set is the same for all the instances requiring the password) as -# the AUTH command will have no effect in Redis instances with authentication -# switched off. -# -# Example: -# -# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd - -# sentinel down-after-milliseconds -# -# Number of milliseconds the master (or any attached slave or sentinel) should -# be unreachable (as in, not acceptable reply to PING, continuously, for the -# specified period) in order to consider it in S_DOWN state (Subjectively -# Down). -# -# Default is 30 seconds. -sentinel config-epoch mymaster 4 - -# sentinel parallel-syncs -# -# How many slaves we can reconfigure to point to the new slave simultaneously -# during the failover. Use a low number if you use the slaves to serve query -# to avoid that all the slaves will be unreachable at about the same -# time while performing the synchronization with the master. -sentinel leader-epoch mymaster 4 - -# sentinel failover-timeout -# -# Specifies the failover timeout in milliseconds. It is used in many ways: -# -# - The time needed to re-start a failover after a previous failover was -# already tried against the same master by a given Sentinel, is two -# times the failover timeout. -# -# - The time needed for a slave replicating to a wrong master according -# to a Sentinel current configuration, to be forced to replicate -# with the right master, is exactly the failover timeout (counting since -# the moment a Sentinel detected the misconfiguration). -# -# - The time needed to cancel a failover that is already in progress but -# did not produced any configuration change (SLAVEOF NO ONE yet not -# acknowledged by the promoted slave). -# -# - The maximum time a failover in progress waits for all the slaves to be -# reconfigured as slaves of the new master. However even after this time -# the slaves will be reconfigured by the Sentinels anyway, but not with -# the exact parallel-syncs progression as specified. -# -# Default is 3 minutes. -sentinel known-slave mymaster 127.0.0.1 6381 -sentinel known-slave mymaster 127.0.0.1 6382 - -# SCRIPTS EXECUTION -# -# sentinel notification-script and sentinel reconfig-script are used in order -# to configure scripts that are called to notify the system administrator -# or to reconfigure clients after a failover. The scripts are executed -# with the following rules for error handling: -# -# If script exits with "1" the execution is retried later (up to a maximum -# number of times currently set to 10). -# -# If script exits with "2" (or an higher value) the script execution is -# not retried. -# -# If script terminates because it receives a signal the behavior is the same -# as exit code 1. -# -# A script has a maximum running time of 60 seconds. After this limit is -# reached the script is terminated with a SIGKILL and the execution retried. - -# NOTIFICATION SCRIPT -# -# sentinel notification-script -# -# Call the specified notification script for any sentinel event that is -# generated in the WARNING level (for instance -sdown, -odown, and so forth). -# This script should notify the system administrator via email, SMS, or any -# other messaging system, that there is something wrong with the monitored -# Redis systems. -# -# The script is called with just two arguments: the first is the event type -# and the second the event description. -# -# The script must exist and be executable in order for sentinel to start if -# this option is provided. -# -# Example: -# -# sentinel notification-script mymaster /var/redis/notify.sh - -# CLIENTS RECONFIGURATION SCRIPT -# -# sentinel client-reconfig-script -# -# When the master changed because of a failover a script can be called in -# order to perform application-specific tasks to notify the clients that the -# configuration has changed and the master is at a different address. -# -# The following arguments are passed to the script: -# -# -# -# is currently always "failover" -# is either "leader" or "observer" -# -# The arguments from-ip, from-port, to-ip, to-port are used to communicate -# the old address of the master and the new address of the elected slave -# (now a master). -# -# This script should be resistant to multiple invocations. -# -# Example: -# -# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6381/sentinel.windows.conf b/src/sentinel/redis-6381/sentinel.windows.conf new file mode 100644 index 00000000..c85c7d09 --- /dev/null +++ b/src/sentinel/redis-6381/sentinel.windows.conf @@ -0,0 +1,181 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26381 +bind 127.0.0.1 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6381" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6382/redis.conf b/src/sentinel/redis-6382/redis.conf new file mode 100644 index 00000000..2ddf17cc --- /dev/null +++ b/src/sentinel/redis-6382/redis.conf @@ -0,0 +1,6 @@ +# Relative to ./src/sentinel +include redis.conf + +port 6382 +dir ./redis-6382/state +slaveof 127.0.0.1 6380 diff --git a/src/sentinel/redis-6382/redis.windows.conf b/src/sentinel/redis-6382/redis.windows.conf index 1764dfe3..3c857473 100644 --- a/src/sentinel/redis-6382/redis.windows.conf +++ b/src/sentinel/redis-6382/redis.windows.conf @@ -34,7 +34,7 @@ # On Windows, daemonize and pidfile are not supported. # However, you can run redis as a Windows service, and specify a logfile. -# The logfile will contain the pid. +# The logfile will contain the pid. # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. @@ -96,12 +96,12 @@ tcp-keepalive 0 loglevel verbose # Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. +# Redis to log on the standard output. logfile "" -# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to # yes, and optionally update the other syslog parameters to suit your needs. -# If Redis is installed and launched as a Windows Service, this will +# If Redis is installed and launched as a Windows Service, this will # automatically be enabled. # syslog-enabled no @@ -176,9 +176,9 @@ dbfilename "dump.rdb" # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. -# +# # The Append Only File will also be created inside this directory. -# +# # Note that you must specify a directory here, not a file name. dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" @@ -379,7 +379,7 @@ slave-priority 100 # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). -# +# # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. @@ -418,63 +418,14 @@ slave-priority 100 # # maxclients 10000 -# The Linux version of Redis relies on the system call fork() to perform -# point-in-time snapshots of the heap. In addition to the AOF and RDB backup -# mechanism, the master-slave synchronization and clustering features are -# dependent on this behavior of fork(). In order for the Windows version to -# perform like the Linux version we had to simulate this aspect of fork(). -# Doing so meant moving the Redis heap into a memory mapped file that can -# be shared with a child process. -# -# *** There must be disk space available for this file in order for Redis -# to launch. *** The default configuration places this file in the local -# appdata directory. If you wish to move this file to another local disk, -# use the heapdir flag as described below. -# -# The maxheap flag controls the maximum size of this memory mapped file, -# as well as the total usable space for the Redis heap. Running Redis -# without either maxheap or maxmemory will result in a memory mapped file -# being created that is equal to the size of physical memory. During -# fork() operations the total page file commit will max out at around: -# -# (size of physical memory) + (2 * size of maxheap) -# -# For instance, on a machine with 8GB of physical RAM, the max page file -# commit with the default maxheap size will be (8)+(2*8) GB , or 24GB. The -# default page file sizing of Windows will allow for this without having -# to reconfigure the system. Larger heap sizes are possible, but the maximum -# page file size will have to be increased accordingly. -# -# The Redis heap must be larger than the value specified by the maxmemory -# flag, as the heap allocator has its own memory requirements and -# fragmentation of the heap is inevitable. If only the maxmemory flag is -# specified, maxheap will be set at 1.5*maxmemory. If the maxheap flag is -# specified along with maxmemory, the maxheap flag will be automatically -# increased if it is smaller than 1.5*maxmemory. -# -# maxheap -maxheap 1gb - -# The heap memory mapped file must reside on a local path for heap sharing -# between processes to work. A UNC path will not suffice here. For maximum -# performance this should be located on the fastest local drive available. -# This value defaults to the local application data folder(e.g., -# "%USERPROFILE%\AppData\Local"). Since this file can be very large, you -# may wish to place this on a drive other than the one the operating system -# is installed on. -# -# Note that you must specify a directory here, not a file name. -# heapdir -heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382 - # If Redis is to be used as an in-memory-only cache without any kind of # persistence, then the fork() mechanism used by the background AOF/RDB -# persistence is unneccessary. As an optimization, all persistence can be -# turned off in the Windows version of Redis. This will disable the creation of -# the memory mapped heap file, redirect heap allocations to the system heap -# allocator, and disable commands that would otherwise cause fork() operations: -# BGSAVE and BGREWRITEAOF. This flag may not be combined with any of the other -# flags that configure AOF and RDB operations. +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. # persistence-available [(yes)|no] # Don't use more memory than the specified amount of bytes. @@ -500,18 +451,33 @@ heapdir C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382 # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: -# +# # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key according to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations -# +# # Note: with any of the above policies, Redis will return an error on write # operations, when there are no suitable keys for eviction. # @@ -600,7 +566,7 @@ appendfsync everysec # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). -# +# # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no @@ -608,7 +574,7 @@ no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. -# +# # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). @@ -667,6 +633,114 @@ aof-load-truncated yes # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified @@ -675,7 +749,7 @@ lua-time-limit 5000 # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). -# +# # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the @@ -806,7 +880,7 @@ hll-sparse-max-bytes 3000 # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. -# +# # The default is to use this millisecond 10 times every second in order to # actively rehash the main dictionaries, freeing memory when possible. # diff --git a/src/sentinel/redis-6382/sentinel.conf b/src/sentinel/redis-6382/sentinel.conf index 7e0a5003..d52c0824 100644 --- a/src/sentinel/redis-6382/sentinel.conf +++ b/src/sentinel/redis-6382/sentinel.conf @@ -1,180 +1,7 @@ -# Redis1 sentinel.conf +# Relative to ./src/sentinel -# port -# The port that this sentinel instance will run on port 26382 - -# sentinel announce-ip -# sentinel announce-port -# -# The above two configuration directives are useful in environments where, -# because of NAT, Sentinel is reachable from outside via a non-local address. -# -# When announce-ip is provided, the Sentinel will claim the specified IP address -# in HELLO messages used to gossip its presence, instead of auto-detecting the -# local address as it usually does. -# -# Similarly when announce-port is provided and is valid and non-zero, Sentinel -# will announce the specified TCP port. -# -# The two options don't need to be used together, if only announce-ip is -# provided, the Sentinel will announce the specified IP and the server port -# as specified by the "port" option. If only announce-port is provided, the -# Sentinel will announce the auto-detected local IP and the specified port. -# -# Example: -# -# sentinel announce-ip 1.2.3.4 - -# dir -# Every long running process should have a well-defined working directory. -# For Redis Sentinel to chdir to /tmp at startup is the simplest thing -# for the process to don't interfere with administrative tasks such as -# unmounting filesystems. -dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" - -# sentinel monitor -# -# Tells Sentinel to monitor this master, and to consider it in O_DOWN -# (Objectively Down) state only if at least sentinels agree. -# -# Note that whatever is the ODOWN quorum, a Sentinel will require to -# be elected by the majority of the known Sentinels in order to -# start a failover, so no failover can be performed in minority. -# -# Slaves are auto-discovered, so you don't need to specify slaves in -# any way. Sentinel itself will rewrite this configuration file adding -# the slaves using additional configuration options. -# Also note that the configuration file is rewritten when a -# slave is promoted to master. -# -# Note: master name should not include special characters or spaces. -# The valid charset is A-z 0-9 and the three characters ".-_". +dir ./redis-6382/state sentinel monitor mymaster 127.0.0.1 6380 2 +protected-mode no -# sentinel auth-pass -# -# Set the password to use to authenticate with the master and slaves. -# Useful if there is a password set in the Redis instances to monitor. -# -# Note that the master password is also used for slaves, so it is not -# possible to set a different password in masters and slaves instances -# if you want to be able to monitor these instances with Sentinel. -# -# However you can have Redis instances without the authentication enabled -# mixed with Redis instances requiring the authentication (as long as the -# password set is the same for all the instances requiring the password) as -# the AUTH command will have no effect in Redis instances with authentication -# switched off. -# -# Example: -# -# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd - -# sentinel down-after-milliseconds -# -# Number of milliseconds the master (or any attached slave or sentinel) should -# be unreachable (as in, not acceptable reply to PING, continuously, for the -# specified period) in order to consider it in S_DOWN state (Subjectively -# Down). -# -# Default is 30 seconds. -sentinel config-epoch mymaster 4 - -# sentinel parallel-syncs -# -# How many slaves we can reconfigure to point to the new slave simultaneously -# during the failover. Use a low number if you use the slaves to serve query -# to avoid that all the slaves will be unreachable at about the same -# time while performing the synchronization with the master. -sentinel leader-epoch mymaster 4 - -# sentinel failover-timeout -# -# Specifies the failover timeout in milliseconds. It is used in many ways: -# -# - The time needed to re-start a failover after a previous failover was -# already tried against the same master by a given Sentinel, is two -# times the failover timeout. -# -# - The time needed for a slave replicating to a wrong master according -# to a Sentinel current configuration, to be forced to replicate -# with the right master, is exactly the failover timeout (counting since -# the moment a Sentinel detected the misconfiguration). -# -# - The time needed to cancel a failover that is already in progress but -# did not produced any configuration change (SLAVEOF NO ONE yet not -# acknowledged by the promoted slave). -# -# - The maximum time a failover in progress waits for all the slaves to be -# reconfigured as slaves of the new master. However even after this time -# the slaves will be reconfigured by the Sentinels anyway, but not with -# the exact parallel-syncs progression as specified. -# -# Default is 3 minutes. -sentinel known-slave mymaster 127.0.0.1 6381 -sentinel known-slave mymaster 127.0.0.1 6382 - -# SCRIPTS EXECUTION -# -# sentinel notification-script and sentinel reconfig-script are used in order -# to configure scripts that are called to notify the system administrator -# or to reconfigure clients after a failover. The scripts are executed -# with the following rules for error handling: -# -# If script exits with "1" the execution is retried later (up to a maximum -# number of times currently set to 10). -# -# If script exits with "2" (or an higher value) the script execution is -# not retried. -# -# If script terminates because it receives a signal the behavior is the same -# as exit code 1. -# -# A script has a maximum running time of 60 seconds. After this limit is -# reached the script is terminated with a SIGKILL and the execution retried. - -# NOTIFICATION SCRIPT -# -# sentinel notification-script -# -# Call the specified notification script for any sentinel event that is -# generated in the WARNING level (for instance -sdown, -odown, and so forth). -# This script should notify the system administrator via email, SMS, or any -# other messaging system, that there is something wrong with the monitored -# Redis systems. -# -# The script is called with just two arguments: the first is the event type -# and the second the event description. -# -# The script must exist and be executable in order for sentinel to start if -# this option is provided. -# -# Example: -# -# sentinel notification-script mymaster /var/redis/notify.sh - -# CLIENTS RECONFIGURATION SCRIPT -# -# sentinel client-reconfig-script -# -# When the master changed because of a failover a script can be called in -# order to perform application-specific tasks to notify the clients that the -# configuration has changed and the master is at a different address. -# -# The following arguments are passed to the script: -# -# -# -# is currently always "failover" -# is either "leader" or "observer" -# -# The arguments from-ip, from-port, to-ip, to-port are used to communicate -# the old address of the master and the new address of the elected slave -# (now a master). -# -# This script should be resistant to multiple invocations. -# -# Example: -# -# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis-6382/sentinel.windows.conf b/src/sentinel/redis-6382/sentinel.windows.conf new file mode 100644 index 00000000..1aca1b25 --- /dev/null +++ b/src/sentinel/redis-6382/sentinel.windows.conf @@ -0,0 +1,181 @@ +# Redis1 sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26382 +bind 127.0.0.1 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir "C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-6382" + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6380 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel config-epoch mymaster 4 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel leader-epoch mymaster 4 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel known-slave mymaster 127.0.0.1 6381 +sentinel known-slave mymaster 127.0.0.1 6382 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh diff --git a/src/sentinel/redis.conf b/src/sentinel/redis.conf new file mode 100644 index 00000000..c446925f --- /dev/null +++ b/src/sentinel/redis.conf @@ -0,0 +1,940 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################ GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +protected-mode no diff --git a/src/sentinel/redis/redis-cli.exe b/src/sentinel/redis/redis-cli.exe new file mode 100644 index 00000000..03e8b172 Binary files /dev/null and b/src/sentinel/redis/redis-cli.exe differ diff --git a/src/sentinel/redis/redis-server.exe b/src/sentinel/redis/redis-server.exe new file mode 100644 index 00000000..e8cd2d3c Binary files /dev/null and b/src/sentinel/redis/redis-server.exe differ diff --git a/src/sentinel/sentinel.conf b/src/sentinel/sentinel.conf new file mode 100644 index 00000000..776e6091 --- /dev/null +++ b/src/sentinel/sentinel.conf @@ -0,0 +1,182 @@ +# Example sentinel.conf + +# port +# The port that this sentinel instance will run on +port 26379 + +# sentinel announce-ip +# sentinel announce-port +# +# The above two configuration directives are useful in environments where, +# because of NAT, Sentinel is reachable from outside via a non-local address. +# +# When announce-ip is provided, the Sentinel will claim the specified IP address +# in HELLO messages used to gossip its presence, instead of auto-detecting the +# local address as it usually does. +# +# Similarly when announce-port is provided and is valid and non-zero, Sentinel +# will announce the specified TCP port. +# +# The two options don't need to be used together, if only announce-ip is +# provided, the Sentinel will announce the specified IP and the server port +# as specified by the "port" option. If only announce-port is provided, the +# Sentinel will announce the auto-detected local IP and the specified port. +# +# Example: +# +# sentinel announce-ip 1.2.3.4 + +# dir +# Every long running process should have a well-defined working directory. +# For Redis Sentinel to chdir to /tmp at startup is the simplest thing +# for the process to don't interfere with administrative tasks such as +# unmounting filesystems. +dir /tmp + +# sentinel monitor +# +# Tells Sentinel to monitor this master, and to consider it in O_DOWN +# (Objectively Down) state only if at least sentinels agree. +# +# Note that whatever is the ODOWN quorum, a Sentinel will require to +# be elected by the majority of the known Sentinels in order to +# start a failover, so no failover can be performed in minority. +# +# Slaves are auto-discovered, so you don't need to specify slaves in +# any way. Sentinel itself will rewrite this configuration file adding +# the slaves using additional configuration options. +# Also note that the configuration file is rewritten when a +# slave is promoted to master. +# +# Note: master name should not include special characters or spaces. +# The valid charset is A-z 0-9 and the three characters ".-_". +sentinel monitor mymaster 127.0.0.1 6379 2 + +# sentinel auth-pass +# +# Set the password to use to authenticate with the master and slaves. +# Useful if there is a password set in the Redis instances to monitor. +# +# Note that the master password is also used for slaves, so it is not +# possible to set a different password in masters and slaves instances +# if you want to be able to monitor these instances with Sentinel. +# +# However you can have Redis instances without the authentication enabled +# mixed with Redis instances requiring the authentication (as long as the +# password set is the same for all the instances requiring the password) as +# the AUTH command will have no effect in Redis instances with authentication +# switched off. +# +# Example: +# +# sentinel auth-pass mymaster MySUPER--secret-0123passw0rd + +# sentinel down-after-milliseconds +# +# Number of milliseconds the master (or any attached slave or sentinel) should +# be unreachable (as in, not acceptable reply to PING, continuously, for the +# specified period) in order to consider it in S_DOWN state (Subjectively +# Down). +# +# Default is 30 seconds. +sentinel down-after-milliseconds mymaster 30000 + +# sentinel parallel-syncs +# +# How many slaves we can reconfigure to point to the new slave simultaneously +# during the failover. Use a low number if you use the slaves to serve query +# to avoid that all the slaves will be unreachable at about the same +# time while performing the synchronization with the master. +sentinel parallel-syncs mymaster 1 + +# sentinel failover-timeout +# +# Specifies the failover timeout in milliseconds. It is used in many ways: +# +# - The time needed to re-start a failover after a previous failover was +# already tried against the same master by a given Sentinel, is two +# times the failover timeout. +# +# - The time needed for a slave replicating to a wrong master according +# to a Sentinel current configuration, to be forced to replicate +# with the right master, is exactly the failover timeout (counting since +# the moment a Sentinel detected the misconfiguration). +# +# - The time needed to cancel a failover that is already in progress but +# did not produced any configuration change (SLAVEOF NO ONE yet not +# acknowledged by the promoted slave). +# +# - The maximum time a failover in progress waits for all the slaves to be +# reconfigured as slaves of the new master. However even after this time +# the slaves will be reconfigured by the Sentinels anyway, but not with +# the exact parallel-syncs progression as specified. +# +# Default is 3 minutes. +sentinel failover-timeout mymaster 180000 + +# SCRIPTS EXECUTION +# +# sentinel notification-script and sentinel reconfig-script are used in order +# to configure scripts that are called to notify the system administrator +# or to reconfigure clients after a failover. The scripts are executed +# with the following rules for error handling: +# +# If script exits with "1" the execution is retried later (up to a maximum +# number of times currently set to 10). +# +# If script exits with "2" (or an higher value) the script execution is +# not retried. +# +# If script terminates because it receives a signal the behavior is the same +# as exit code 1. +# +# A script has a maximum running time of 60 seconds. After this limit is +# reached the script is terminated with a SIGKILL and the execution retried. + +# NOTIFICATION SCRIPT +# +# sentinel notification-script +# +# Call the specified notification script for any sentinel event that is +# generated in the WARNING level (for instance -sdown, -odown, and so forth). +# This script should notify the system administrator via email, SMS, or any +# other messaging system, that there is something wrong with the monitored +# Redis systems. +# +# The script is called with just two arguments: the first is the event type +# and the second the event description. +# +# The script must exist and be executable in order for sentinel to start if +# this option is provided. +# +# Example: +# +# sentinel notification-script mymaster /var/redis/notify.sh + +# CLIENTS RECONFIGURATION SCRIPT +# +# sentinel client-reconfig-script +# +# When the master changed because of a failover a script can be called in +# order to perform application-specific tasks to notify the clients that the +# configuration has changed and the master is at a different address. +# +# The following arguments are passed to the script: +# +# +# +# is currently always "failover" +# is either "leader" or "observer" +# +# The arguments from-ip, from-port, to-ip, to-port are used to communicate +# the old address of the master and the new address of the elected slave +# (now a master). +# +# This script should be resistant to multiple invocations. +# +# Example: +# +# sentinel client-reconfig-script mymaster /var/redis/reconfig.sh + +protected-mode no + diff --git a/src/sentinel/startAll.cmd b/src/sentinel/startAll.cmd index d7ae9d8d..3a4db540 100644 --- a/src/sentinel/startAll.cmd +++ b/src/sentinel/startAll.cmd @@ -2,9 +2,9 @@ start cmd.exe /e:on /k "redis\redis-server redis-6380\redis.windows.conf" start cmd.exe /e:on /k "redis\redis-server redis-6381\redis.windows.conf" start cmd.exe /e:on /k "redis\redis-server redis-6382\redis.windows.conf" -start cmd.exe /e:on /k "redis\redis-server redis-6380\sentinel.conf --sentinel" -start cmd.exe /e:on /k "redis\redis-server redis-6381\sentinel.conf --sentinel" -start cmd.exe /e:on /k "redis\redis-server redis-6382\sentinel.conf --sentinel" +start cmd.exe /e:on /k "redis\redis-server redis-6380\sentinel.windows.conf --sentinel" +start cmd.exe /e:on /k "redis\redis-server redis-6381\sentinel.windows.conf --sentinel" +start cmd.exe /e:on /k "redis\redis-server redis-6382\sentinel.windows.conf --sentinel" pause diff --git a/src/sentinel/startAll.sh b/src/sentinel/startAll.sh new file mode 100755 index 00000000..7019d631 --- /dev/null +++ b/src/sentinel/startAll.sh @@ -0,0 +1,17 @@ +redis_server=redis-server +redis_sentinel=redis-sentinel +redis_cli=redis-cli + +$redis_server redis-6380/redis.conf & +$redis_sentinel redis-6380/sentinel.conf & + +$redis_server redis-6381/redis.conf & +$redis_sentinel redis-6381/sentinel.conf & + +$redis_server redis-6382/redis.conf & +$redis_sentinel redis-6382/sentinel.conf & + +read -n1 -r -p "Press any key to see sentinel info on masters and slaves..." + +$redis_cli -p 26380 sentinel master mymaster +$redis_cli -p 26381 sentinel slaves mymaster diff --git a/src/sentinel/stopAll.sh b/src/sentinel/stopAll.sh new file mode 100755 index 00000000..c6a712de --- /dev/null +++ b/src/sentinel/stopAll.sh @@ -0,0 +1,9 @@ +redis_cli=redis-cli + +$redis_cli -p 26382 SHUTDOWN NOSAVE +$redis_cli -p 26381 SHUTDOWN NOSAVE +$redis_cli -p 26380 SHUTDOWN NOSAVE + +$redis_cli -p 6382 SHUTDOWN NOSAVE +$redis_cli -p 6381 SHUTDOWN NOSAVE +$redis_cli -p 6380 SHUTDOWN NOSAVE diff --git a/src/servicestack.snk b/src/servicestack.snk new file mode 100644 index 00000000..dade7cea Binary files /dev/null and b/src/servicestack.snk differ diff --git a/tests/Console.Tests/App.config b/tests/Console.Tests/App.config deleted file mode 100644 index 2c98e0db..00000000 --- a/tests/Console.Tests/App.config +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/tests/Console.Tests/BlockingPop.cs b/tests/Console.Tests/BlockingPop.cs index ec02d358..90aef4c5 100644 --- a/tests/Console.Tests/BlockingPop.cs +++ b/tests/Console.Tests/BlockingPop.cs @@ -21,12 +21,11 @@ public void Execute() //RedisConfig.DefaultRetryTimeout = 15 * 1000; RedisConfig.DefaultIdleTimeOutSecs = 240; RedisConfig.BackOffMultiplier = 10; - RedisConfig.BufferLength = 1450; RedisConfig.BufferPoolMaxSize = 500000; RedisConfig.VerifyMasterConnections = true; RedisConfig.HostLookupTimeoutMs = 1000; RedisConfig.DeactivatedClientsExpiry = TimeSpan.FromSeconds(15); - RedisConfig.DisableVerboseLogging = false; + RedisConfig.EnableVerboseLogging = true; var redisManager = new RedisManagerPool("localhost?connectTimeout=1000"); @@ -50,7 +49,7 @@ public void Execute() // add items to list for (int i = 1; i <= items; i++) { - redis.PushItemToList(listId, "item {0}".Fmt(i)); + redis.PushItemToList(listId, $"item {i}"); } do @@ -58,7 +57,7 @@ public void Execute() var item = redis.BlockingPopItemFromList(listId, null); // log the popped item. if BRPOP timeout is null and list empty, I do not expect to print anything - log.InfoFormat("{0}", item.IsNullOrEmpty() ? " list empty " : item); + log.InfoFormat("{0}", string.IsNullOrEmpty(item) ? " list empty " : item); System.Threading.Thread.Sleep(1000); diff --git a/tests/Console.Tests/BlockingRemoveAfterReconnection.cs b/tests/Console.Tests/BlockingRemoveAfterReconnection.cs new file mode 100644 index 00000000..b085d9ed --- /dev/null +++ b/tests/Console.Tests/BlockingRemoveAfterReconnection.cs @@ -0,0 +1,35 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class BlockingRemoveAfterReconnection + { + protected internal static RedisManagerPool BasicRedisClientManager; + + public void Execute() + { + //RedisConfig.AssumeServerVersion = 4000; + RedisConfig.DefaultConnectTimeout = 20 * 1000; + RedisConfig.DefaultRetryTimeout = 20 * 1000; + BasicRedisClientManager = new RedisManagerPool(); + try + { + using (var client = BasicRedisClientManager.GetClient()) + { + Console.WriteLine("Blocking..."); + var fromList = client.BlockingRemoveStartFromList("AnyQueue", TimeSpan.FromMinutes(20)); + Console.WriteLine($"Received: {fromList.Dump()}"); + } + } + catch (Exception e) + { + Console.WriteLine(e); + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/BrPopAfterReconnection.cs b/tests/Console.Tests/BrPopAfterReconnection.cs new file mode 100644 index 00000000..f5867f06 --- /dev/null +++ b/tests/Console.Tests/BrPopAfterReconnection.cs @@ -0,0 +1,66 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class BrPopAfterReconnection + { + protected internal static BasicRedisClientManager BasicRedisClientManager; + + public void Execute() + { +// RedisConfig.AssumeServerVersion = 4000; +// RedisConfig.DisableVerboseLogging = false; +// LogManager.LogFactory = new ConsoleLogFactory(); + + var host = "localhost"; + var port = "6379"; + var db = "9"; + + var redisUri = $"{host}:{port}?db={db}"; + + BasicRedisClientManager = new BasicRedisClientManager(redisUri); + var queue = "FormSaved"; + + while (true) + { + Task.Run(() => BlockingReceive(queue)); + Thread.Sleep(1000); + + Console.WriteLine("Restart Redis and press Enter..."); + Console.ReadLine(); + + Console.WriteLine("Enter something:"); + var item = Console.ReadLine(); + + if (!string.IsNullOrWhiteSpace(item)) + { + using (var client = BasicRedisClientManager.GetClient()) + { + client.AddItemToList(queue, item); + } + + Console.WriteLine("Item added"); + } + + Thread.Sleep(1000); + } + } + + public static void BlockingReceive(string queue) + { + using (var client = BasicRedisClientManager.GetReadOnlyClient()) + { + Console.WriteLine($"Listening to {queue}"); + + var fromList = client.BlockingPopItemFromList(queue, TimeSpan.FromSeconds(60)); + + Console.WriteLine($"Received:{fromList.Dump()}"); + } + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/Console.Tests.csproj b/tests/Console.Tests/Console.Tests.csproj index 73a719b1..5b260ab0 100644 --- a/tests/Console.Tests/Console.Tests.csproj +++ b/tests/Console.Tests/Console.Tests.csproj @@ -1,96 +1,17 @@ - - - - - Debug - AnyCPU - {8368C965-B4F6-4263-9ABB-731A175B2E77} - Exe - Properties - ConsoleTests - Console.Tests - v4.5 - 512 - - - - AnyCPU - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - false - - - AnyCPU - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - false - - - - ..\..\lib\tests\ServiceStack.dll - - - ..\..\lib\tests\ServiceStack.Client.dll - - - False - ..\..\lib\ServiceStack.Common.dll - - - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\tests\ServiceStack.Server.dll - - - ..\..\lib\ServiceStack.Text.dll - - - - - - - - - - - - - - - - - - - - - - - - - - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - ServiceStack.Redis - - - - - - - - \ No newline at end of file + + + + Exe + net6.0 + Console.Tests + + + + + + + + + + + diff --git a/tests/Console.Tests/DbSelectConnectionStringIssue.cs b/tests/Console.Tests/DbSelectConnectionStringIssue.cs new file mode 100644 index 00000000..49d453f5 --- /dev/null +++ b/tests/Console.Tests/DbSelectConnectionStringIssue.cs @@ -0,0 +1,39 @@ +using System; +using System.Threading; +using ServiceStack; +using ServiceStack.Logging; +using ServiceStack.Redis; + +namespace ConsoleTests; + +class DbSelectConnectionStringIssue +{ + public void Execute() + { + LogManager.LogFactory = new ConsoleLogFactory(); + + Licensing.RegisterLicense(""); + + var redisManagerPool = new RedisManagerPool("redis://redisHost?db=7"); + + for (int i = 0; i < 5; i++) + { + try + { + using (IRedisClient client = redisManagerPool.GetClient()) + { + string value = client.GetValue("status"); + + Console.WriteLine($"Successfully retrieved value => '{value}'"); + } + } + catch (Exception ex) + { + Console.WriteLine($"Exception handled \n{ex}"); + } + + Console.WriteLine("Sleeping for 25 seconds to allow client to be garbage collected"); + Thread.Sleep(TimeSpan.FromSeconds(25)); + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/ForceFailover.cs b/tests/Console.Tests/ForceFailover.cs index efca4e1b..7ae40136 100644 --- a/tests/Console.Tests/ForceFailover.cs +++ b/tests/Console.Tests/ForceFailover.cs @@ -9,7 +9,7 @@ public class ForceFailover { public void Execute() { - RedisConfig.DisableVerboseLogging = true; + RedisConfig.EnableVerboseLogging = false; LogManager.LogFactory = new ConsoleLogFactory(debugEnabled:true); var sentinel = new RedisSentinel(new [] { diff --git a/tests/Console.Tests/HashCollectionStressTests.cs b/tests/Console.Tests/HashCollectionStressTests.cs index 52cc6dd8..e0a37b2a 100644 --- a/tests/Console.Tests/HashCollectionStressTests.cs +++ b/tests/Console.Tests/HashCollectionStressTests.cs @@ -272,7 +272,7 @@ private TOut RetryAction(Func action) return result; } } - catch (Exception ex) + catch (Exception) { if (i++ < 3) diff --git a/tests/Console.Tests/HashStressTest.cs b/tests/Console.Tests/HashStressTest.cs index baf3234c..5259a84d 100644 --- a/tests/Console.Tests/HashStressTest.cs +++ b/tests/Console.Tests/HashStressTest.cs @@ -27,6 +27,11 @@ public override bool Equals(object obj) if (obj.GetType() != this.GetType()) return false; return Equals((DeviceInfo) obj); } + + public override int GetHashCode() + { + return base.GetHashCode(); + } } public class HashStressTest diff --git a/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs b/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs index 56095e03..d6caa531 100644 --- a/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs +++ b/tests/Console.Tests/LocalRedisSentinelFailoverTests.cs @@ -120,7 +120,7 @@ protected override void OnSetUp() StartRedisServersAndSentinels(); } - protected virtual void OnTearDown() + protected override void OnTearDown() { log.Debug("Press Enter to shutdown Redis Sentinels and Servers..."); Console.ReadLine(); diff --git a/tests/Console.Tests/MasterFailoverWithPassword.cs b/tests/Console.Tests/MasterFailoverWithPassword.cs index f9e0257b..a77b7093 100644 --- a/tests/Console.Tests/MasterFailoverWithPassword.cs +++ b/tests/Console.Tests/MasterFailoverWithPassword.cs @@ -9,23 +9,25 @@ public class MasterFailoverWithPassword { public void Execute() { + string AddPassword(string host) => $"password@{host}"; + var sentinelHosts = new[] { "127.0.0.1:26380", "127.0.0.1:26381", "127.0.0.1:26382" }; - var sentinel = new RedisSentinel(sentinelHosts, masterName: "mymaster"); - sentinel.HostFilter = host => "password@{0}".Fmt(host); + var sentinel = new RedisSentinel(sentinelHosts.Map(AddPassword), masterName: "mymaster") { + HostFilter = AddPassword, + SentinelHostFilter = AddPassword, + }; var manager = sentinel.Start(); - sentinel.OnWorkerError = exception => Console.WriteLine(exception); + sentinel.OnWorkerError = Console.WriteLine; while (true) { try { const string RedisKey = "my Name"; - using (var client = manager.GetClient()) - { - var result = client.Get(RedisKey); - Console.WriteLine("Redis Key: {0} \t Port: {1}", result, client.Port); - } + using var client = manager.GetClient(); + var result = client.Get(RedisKey); + Console.WriteLine("Redis Key: {0} \t Port: {1}", result, client.Port); } catch (Exception ex) { diff --git a/tests/Console.Tests/MultiBlockingRemoveAfterReconnection.cs b/tests/Console.Tests/MultiBlockingRemoveAfterReconnection.cs new file mode 100644 index 00000000..3abdf40a --- /dev/null +++ b/tests/Console.Tests/MultiBlockingRemoveAfterReconnection.cs @@ -0,0 +1,72 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using ServiceStack.Logging; +using ServiceStack.Redis; +using ServiceStack.Text; + +namespace ConsoleTests +{ + public class MultiBlockingRemoveAfterReconnection + { + protected internal static RedisManagerPool RedisManager; + + public void Execute() + { +// LogManager.LogFactory = new ConsoleLogFactory(); +// RedisConfig.EnableVerboseLogging = true; + + RedisConfig.DefaultConnectTimeout = 20 * 1000; + RedisConfig.DefaultRetryTimeout = 20 * 1000; + + RedisManager = new RedisManagerPool($"localhost:6379?db=9"); + + MultipleBlocking(3); + + Console.ReadLine(); + } + + private static void MultipleBlocking(int count) + { + for (int i = 0; i < count; i++) + { + var queue = $"Q{i + 1}"; + RunTask(() => BlockingRemoveStartFromList(queue), $"Receive from {queue}"); + } + } + public static void BlockingRemoveStartFromList(string queue) + { + using (var client = RedisManager.GetClient() as RedisClient) + { + client.Ping(); + Console.WriteLine($"#{client.ClientId} Listening to {queue}"); + + var fromList = client.BlockingRemoveStartFromList(queue, TimeSpan.FromHours(10)); + Console.WriteLine($"#{client.ClientId} Received: '{fromList.Dump()}' from '{queue}'"); + } + } + + private static void RunTask(Action action, string name) + { + Task.Run(() => + { + + while (true) + { + try + { + Console.WriteLine($"Invoking {name}"); + action.Invoke(); + } + catch (Exception exception) + { + Console.WriteLine($"Exception in {name}: {exception}"); + //Thread.Sleep(5000);// Give redis some time to wake up! + } + + Thread.Sleep(100); + } + }); + } + } +} \ No newline at end of file diff --git a/tests/Console.Tests/Program.cs b/tests/Console.Tests/Program.cs index 54068dba..2db1835b 100644 --- a/tests/Console.Tests/Program.cs +++ b/tests/Console.Tests/Program.cs @@ -36,7 +36,13 @@ static void Main(string[] args) //new BlockingPop().Execute(); - new MasterFailoverWithPassword().Execute(); + //new MasterFailoverWithPassword().Execute(); + + //new BlockingRemoveAfterReconnection().Execute(); + + //new MultiBlockingRemoveAfterReconnection().Execute(); + + new DbSelectConnectionStringIssue().Execute(); } } } diff --git a/tests/Console.Tests/Properties/AssemblyInfo.cs b/tests/Console.Tests/Properties/AssemblyInfo.cs deleted file mode 100644 index d188967b..00000000 --- a/tests/Console.Tests/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; - -// General Information about an assembly is controlled through the following -// set of attributes. Change these attribute values to modify the information -// associated with an assembly. -[assembly: AssemblyTitle("Console.Tests")] -[assembly: AssemblyDescription("")] -[assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("Console.Tests")] -[assembly: AssemblyCopyright("Copyright © 2014")] -[assembly: AssemblyTrademark("")] -[assembly: AssemblyCulture("")] - -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from -// COM, set the ComVisible attribute to true on that type. -[assembly: ComVisible(false)] - -// The following GUID is for the ID of the typelib if this project is exposed to COM -[assembly: Guid("33cdad3a-8f89-437f-9a64-261c162888aa")] - -// Version information for an assembly consists of the following four values: -// -// Major Version -// Minor Version -// Build Number -// Revision -// -// You can specify all the values or you can default the Build and Revision Numbers -// by using the '*' as shown below: -// [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/tests/Console.Tests/RedisSentinelFailoverTests.cs b/tests/Console.Tests/RedisSentinelFailoverTests.cs index c8c9fd00..b15bfa88 100644 --- a/tests/Console.Tests/RedisSentinelFailoverTests.cs +++ b/tests/Console.Tests/RedisSentinelFailoverTests.cs @@ -23,7 +23,7 @@ public abstract class RedisSentinelFailoverTests public void Execute() { - RedisConfig.DisableVerboseLogging = true; + RedisConfig.EnableVerboseLogging = false; LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); log = LogManager.GetLogger(GetType()); @@ -35,7 +35,7 @@ public void Execute() { if (UseRedisManagerPool) { - sentinel.RedisManagerFactory = (masters, slaves) => + sentinel.RedisManagerFactory = (masters, replicas) => new RedisManagerPool(masters); } @@ -68,7 +68,7 @@ public void Execute() log.DebugFormat("{0} = {1}", key, value); } } - catch (ObjectDisposedException ex) + catch (ObjectDisposedException) { log.DebugFormat("ObjectDisposedException detected, disposing timer..."); clientTimer.Dispose(); diff --git a/tests/Directory.Build.props b/tests/Directory.Build.props new file mode 100644 index 00000000..7eb7717e --- /dev/null +++ b/tests/Directory.Build.props @@ -0,0 +1,29 @@ + + + + 6.0.3 + latest + false + + + + DEBUG + + + + $(DefineConstants);NETFX;NET472 + + + + $(DefineConstants);NETCORE;NETSTANDARD2_0 + + + + $(DefineConstants);NET6_0;NET6_0_OR_GREATER + + + + $(DefineConstants);NETCORE;NETCORE_SUPPORT + + + diff --git a/tests/ServiceStack.Redis.Benchmark/IncrBenchmarks.cs b/tests/ServiceStack.Redis.Benchmark/IncrBenchmarks.cs new file mode 100644 index 00000000..b39aa825 --- /dev/null +++ b/tests/ServiceStack.Redis.Benchmark/IncrBenchmarks.cs @@ -0,0 +1,281 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Configs; +using BenchmarkDotNet.Jobs; +using BenchmarkDotNet.Order; +using Pipelines.Sockets.Unofficial; +using Respite; +using StackExchange.Redis; +using System.Linq; +using System.Net.Sockets; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Benchmark +{ + [SimpleJob(RuntimeMoniker.Net472)] + [SimpleJob(RuntimeMoniker.NetCoreApp31)] + [MemoryDiagnoser] + [GroupBenchmarksBy(BenchmarkLogicalGroupRule.ByCategory)] + [Orderer(SummaryOrderPolicy.Method, MethodOrderPolicy.Alphabetical)] + [CategoriesColumn] + public class IncrBenchmarks + { + ConnectionMultiplexer _seredis; + IServer _seredis_server; + IDatabase _seredis_db; + RedisClient _ssredis; + IRedisClientAsync _ssAsync; + RespConnection _respite; + + static IncrBenchmarks() + { + RedisClient.NewFactoryFn = () => new RedisClient("127.0.0.1", 6379); + } + + [GlobalSetup] + public Task Setup() => Setup(false); + internal async Task Setup(bool minimal) + { + _ssredis = RedisClient.New(); + _ssAsync = _ssredis; + + if (!minimal) + { + _seredis = await ConnectionMultiplexer.ConnectAsync("127.0.0.1:6379"); + _seredis_server = _seredis.GetServer(_seredis.GetEndPoints().Single()); + _seredis_db = _seredis.GetDatabase(); + + var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); + SocketConnection.SetRecommendedClientOptions(socket); + socket.Connect("127.0.0.1", 6379); + + _respite = RespConnection.Create(socket); + } + } + + [GlobalCleanup] + public async Task Teardown() + { + _seredis?.Dispose(); + _ssredis?.Dispose(); + if (_respite != null) await _respite.DisposeAsync(); + + _seredis_server = null; + _seredis_db = null; + _seredis = null; + _ssredis = null; + _respite = null; + _ssAsync = null; + } + + const string Key = "my_key"; +#if DEBUG + const int PER_TEST = 10; +#else + const int PER_TEST = 1000; +#endif + + [BenchmarkCategory("IncrAsync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisIncrAsync() + { + long last = default; + await _seredis_db.KeyDeleteAsync(Key); + for (int i = 0; i < PER_TEST; i++) + { + last = await _seredis_db.StringIncrementAsync(Key); + } + return last; + } + + [BenchmarkCategory("IncrSync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public long SERedisIncrSync() + { + long last = default; + _seredis_db.KeyDelete(Key); + for (int i = 0; i < PER_TEST; i++) + { + last = _seredis_db.StringIncrement(Key); + } + return last; + } + + [BenchmarkCategory("PipelineIncrAsync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisPipelineIncrAsync() + { + var last = Task.FromResult(0L); + await _seredis_db.KeyDeleteAsync(Key); + var batch = _seredis_db.CreateBatch(); + for (int i = 0; i < PER_TEST; i++) + { + last = batch.StringIncrementAsync(Key); + } + batch.Execute(); + return await last; + } + + [BenchmarkCategory("TransactionIncrAsync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisTransactionIncrAsync() + { + var last = Task.FromResult(0L); + await _seredis_db.KeyDeleteAsync(Key); + var batch = _seredis_db.CreateTransaction(); + for (int i = 0; i < PER_TEST; i++) + { + last = batch.StringIncrementAsync(Key); + } + await batch.ExecuteAsync(); + return await last; + } + + [BenchmarkCategory("TransactionIncrSync")] + [Benchmark(Description = "SERedis", OperationsPerInvoke = PER_TEST)] + public async Task SERedisTransactionIncrSync() + { + var last = Task.FromResult(0L); + _seredis_db.KeyDelete(Key); + var batch = _seredis_db.CreateTransaction(); + for (int i = 0; i < PER_TEST; i++) + { + last = batch.StringIncrementAsync(Key); + } + batch.Execute(); + return await last; + } + + [BenchmarkCategory("IncrAsync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public async Task SSRedisIncrAsync() + { + long last = default; + _ssredis.Del(Key); // todo: asyncify + for (int i = 0; i < PER_TEST; i++) + { + last = await _ssAsync.IncrementValueAsync(Key); + } + return last; + } + + + [BenchmarkCategory("IncrSync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public long SSRedisIncrSync() + { + long last = default; + _ssredis.Del(Key); + for (int i = 0; i < PER_TEST; i++) + { + last = _ssredis.IncrementValue(Key); + } + return last; + } + + [BenchmarkCategory("PipelineIncrSync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public long SSRedisPipelineIncrSync() + { + long last = default; + _ssredis.Del(Key); + using var trans = _ssredis.CreatePipeline(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValue(Key), l => last = l); + } + trans.Flush(); + return last; + } + + [BenchmarkCategory("TransactionIncrSync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public long SSRedisTransactionIncrSync() + { + long last = default; + _ssredis.Del(Key); + using var trans = _ssredis.CreateTransaction(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValue(Key), l => last = l); + } + trans.Commit(); + return last; + } + + [BenchmarkCategory("PipelineIncrAsync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public async Task SSRedisPipelineIncrAsync() + { + long last = default; + _ssredis.Del(Key); // todo: asyncify + await using var trans = _ssAsync.CreatePipeline(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), l => last = l); + } + await trans.FlushAsync(); + return last; + } + + [BenchmarkCategory("TransactionIncrAsync")] + [Benchmark(Description = "SSRedis", OperationsPerInvoke = PER_TEST)] + public async Task SSRedisTransactionIncrAsync() + { + long last = default; + _ssredis.Del(Key); // todo: asyncify + await using var trans = await _ssAsync.CreateTransactionAsync(); + for (int i = 0; i < PER_TEST; i++) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), l => last = l); + } + await trans.CommitAsync(); + return last; + } + + + //static readonly RespValue s_Time = RespValue.CreateAggregate( + // RespType.Array, RespValue.Create(RespType.BlobString, "time")); + + //static DateTime ParseTime(in RespValue value) + //{ + // var parts = value.SubItems; + // if (parts.TryGetSingleSpan(out var span)) + // return Parse(span[0], span[1]); + // return Slow(parts); + // static DateTime Slow(in ReadOnlyBlock parts) + // { + // var iter = parts.GetEnumerator(); + // if (!iter.MoveNext()) Throw(); + // var seconds = iter.Current; + // if (!iter.MoveNext()) Throw(); + // var microseconds = iter.Current; + // return Parse(seconds, microseconds); + // static void Throw() => throw new InvalidOperationException(); + // } + + // static DateTime Parse(in RespValue seconds, in RespValue microseconds) + // => Epoch.AddSeconds(seconds.ToInt64()).AddMilliseconds(microseconds.ToInt64() / 1000.0); + //} + //static readonly DateTime Epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); + + //[BenchmarkCategory("IncrSync")] + //[Benchmark(Description = "Respite", OperationsPerInvoke = PER_TEST)] + //public void RespiteTimeSync() + //{ + // for (int i = 0; i < PER_TEST; i++) + // { + // _respite.Call(s_Time, val => ParseTime(val)); + // } + //} + + //[BenchmarkCategory("IncrAsync")] + //[Benchmark(Description = "Respite", OperationsPerInvoke = PER_TEST)] + //public async Task RespiteTimeAsync() + //{ + // for (int i = 0; i < PER_TEST; i++) + // { + // await _respite.CallAsync(s_Time, val => ParseTime(val)); + // } + //} + } +} diff --git a/tests/ServiceStack.Redis.Benchmark/Program.cs b/tests/ServiceStack.Redis.Benchmark/Program.cs new file mode 100644 index 00000000..84c48100 --- /dev/null +++ b/tests/ServiceStack.Redis.Benchmark/Program.cs @@ -0,0 +1,39 @@ +using BenchmarkDotNet.Running; +using System.Threading.Tasks; +using System; +namespace ServiceStack.Redis.Benchmark +{ + class Program + { +#if DEBUG + static async Task Main() + { + var obj = new IncrBenchmarks(); + try + { + await obj.Setup(false); + + Console.WriteLine(obj.SERedisIncrSync()); + Console.WriteLine(await obj.SERedisIncrAsync()); + Console.WriteLine(await obj.SERedisPipelineIncrAsync()); + Console.WriteLine(await obj.SERedisTransactionIncrAsync()); + Console.WriteLine(await obj.SERedisTransactionIncrSync()); + + Console.WriteLine(obj.SSRedisIncrSync()); + Console.WriteLine(obj.SSRedisPipelineIncrSync()); + Console.WriteLine(obj.SSRedisTransactionIncrSync()); + Console.WriteLine(await obj.SSRedisIncrAsync()); + Console.WriteLine(await obj.SSRedisPipelineIncrAsync()); + Console.WriteLine(await obj.SSRedisTransactionIncrAsync()); + } + finally + { + await obj.Teardown(); + } + } +#else + static void Main(string[] args) + => BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args); +#endif + } +} diff --git a/tests/ServiceStack.Redis.Benchmark/ServiceStack.Redis.Benchmark.csproj b/tests/ServiceStack.Redis.Benchmark/ServiceStack.Redis.Benchmark.csproj new file mode 100644 index 00000000..a5bcef26 --- /dev/null +++ b/tests/ServiceStack.Redis.Benchmark/ServiceStack.Redis.Benchmark.csproj @@ -0,0 +1,18 @@ + + + + Exe + net6.0;net472 + 8 + + + + + + + + + + + + diff --git a/tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests/project.json b/tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests/project.json deleted file mode 100644 index f673dfd6..00000000 --- a/tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests/project.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "configurations": { - "Debug": { - "buildOptions": { - "define": ["NETCORE_SUPPORT", "NETCORE", "DEBUG", "TRACE"] - } - }, - "Release": { - "buildOptions": { - "define": ["NETCORE_SUPPORT", "NETCORE", "TRACE"], - "optimize": true - } - } - }, - "buildOptions": { - "debugType": "portable", - "emitEntryPoint": true - }, - "compile": ["../../ServiceStack.Redis.Tests/**/*.cs"], - "dependencies": { - "Microsoft.NETCore.App": { - "type": "platform", - "version": "1.0.0" - }, - "NUnitLite" : "3.2.1", - "ServiceStack.Redis" : "4.0.*", - "ServiceStack.Interfaces" : "4.0.*", - "ServiceStack.Text" : "4.0.*", - "ServiceStack.Common" : "4.0.*" - }, - "frameworks": { - "netcoreapp1.0": { - "imports": "dnxcore50", - "dependencies" : { - "System.Runtime.Serialization.Primitives": "4.1.1" - } - } - } -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.NetCore/global.json b/tests/ServiceStack.Redis.Tests.NetCore/global.json deleted file mode 100644 index 15c0cc85..00000000 --- a/tests/ServiceStack.Redis.Tests.NetCore/global.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "projects": [ - "../../src/ServiceStack.Redis.NetCore", - "ServiceStack.Common.Tests" - ] -} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests/Program.cs b/tests/ServiceStack.Redis.Tests.Sentinel/NetCoreTestsRunner.cs similarity index 78% rename from tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests/Program.cs rename to tests/ServiceStack.Redis.Tests.Sentinel/NetCoreTestsRunner.cs index 38fd5d1f..82e5386b 100644 --- a/tests/ServiceStack.Redis.Tests.NetCore/ServiceStack.Redis.Tests/Program.cs +++ b/tests/ServiceStack.Redis.Tests.Sentinel/NetCoreTestsRunner.cs @@ -1,3 +1,4 @@ +#if NUNITLITE using NUnitLite; using NUnit.Common; using System.Reflection; @@ -7,9 +8,9 @@ using System.Globalization; using System.Threading; -namespace NUnitLite.Tests +namespace ServiceStack.Redis.Tests.Sentinel { - public class Program + public class NetCoreTestsRunner { /// /// The main program executes the tests. Output may be routed to @@ -26,11 +27,12 @@ public static int Main(string[] args) Licensing.RegisterLicense(licenseKey); //"ActivatedLicenseFeatures: ".Print(LicenseUtils.ActivatedLicenseFeatures()); - CultureInfo.DefaultThreadCurrentCulture = new CultureInfo("en-US"); + CultureInfo.DefaultThreadCurrentCulture = new CultureInfo("en-US"); JsConfig.InitStatics(); //JsonServiceClient client = new JsonServiceClient(); var writer = new ExtendedTextWrapper(Console.Out); - return new AutoRun(((IReflectableType)typeof(Program)).GetTypeInfo().Assembly).Execute(args, writer, Console.In); + return new AutoRun(((IReflectableType)typeof(NetCoreTestsRunner)).GetTypeInfo().Assembly).Execute(args, writer, Console.In); } } -} \ No newline at end of file +} +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs index 78d72367..6fdee8fb 100644 --- a/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs +++ b/tests/ServiceStack.Redis.Tests.Sentinel/Redis3SentinelSetupTests.cs @@ -4,6 +4,7 @@ namespace ServiceStack.Redis.Tests.Sentinel { [TestFixture, Category("Integration")] + [Ignore("Requires cloud setup")] public class Redis3SentinelSetupTests : RedisSentinelTestBase { @@ -57,9 +58,9 @@ public void Can_connect_to_GoogleCloud_3SentinelSetup() client.FlushAll(); - client.SetEntry("Sentinel3Setup", "GoogleCloud"); + client.SetValue("Sentinel3Setup", "GoogleCloud"); - var result = client.GetEntry("Sentinel3Setup"); + var result = client.GetValue("Sentinel3Setup"); Assert.That(result, Is.EqualTo("GoogleCloud")); } @@ -67,7 +68,7 @@ public void Can_connect_to_GoogleCloud_3SentinelSetup() { "{0}:{1}".Print(readOnly.Host, readOnly.Port); - var result = readOnly.GetEntry("Sentinel3Setup"); + var result = readOnly.GetValue("Sentinel3Setup"); Assert.That(result, Is.EqualTo("GoogleCloud")); } } diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs index 1186c1fd..ee8dddc3 100644 --- a/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisResolverTests.cs @@ -9,14 +9,14 @@ namespace ServiceStack.Redis.Tests.Sentinel public class RedisResolverTests : RedisSentinelTestBase { - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { StartAllRedisServers(); } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { ShutdownAllRedisServers(); } @@ -24,10 +24,10 @@ public void TestFixtureTearDown() [Test] public void RedisResolver_does_reset_when_detects_invalid_master() { - var invalidMaster = new[] { SlaveHosts[0] }; - var invalidSlaves = new[] { MasterHosts[0], SlaveHosts[1] }; + var invalidMaster = new[] { ReplicaHosts[0] }; + var invalidReplicas = new[] { MasterHosts[0], ReplicaHosts[1] }; - using (var redisManager = new PooledRedisClientManager(invalidMaster, invalidSlaves)) + using (var redisManager = new PooledRedisClientManager(invalidMaster, invalidReplicas)) { var resolver = (RedisResolver)redisManager.RedisResolver; @@ -44,7 +44,7 @@ public void RedisResolver_does_reset_when_detects_invalid_master() "Masters:".Print(); resolver.Masters.PrintDump(); - "Slaves:".Print(); + "Replicas:".Print(); resolver.Slaves.PrintDump(); } } @@ -52,71 +52,61 @@ public void RedisResolver_does_reset_when_detects_invalid_master() [Test] public void PooledRedisClientManager_alternates_hosts() { - using (var redisManager = new PooledRedisClientManager(MasterHosts, SlaveHosts)) + using var redisManager = new PooledRedisClientManager(MasterHosts, ReplicaHosts); + using (var master = redisManager.GetClient()) { - using (var master = redisManager.GetClient()) - { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.SetValue("KEY", "1"); - } - using (var master = redisManager.GetClient()) - { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.Increment("KEY", 1); - } + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } - 5.Times(i => - { - using (var readOnly = redisManager.GetReadOnlyClient()) - { - Assert.That(readOnly.GetHostString(), Is.EqualTo(SlaveHosts[i % SlaveHosts.Length])); - Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); - } - }); + 5.Times(i => { + using var readOnly = redisManager.GetReadOnlyClient(); + Assert.That(readOnly.GetHostString(), Is.EqualTo(ReplicaHosts[i % ReplicaHosts.Length])); + Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); + }); - using (var cahce = redisManager.GetCacheClient()) - { - Assert.That(cahce.Get("KEY"), Is.EqualTo("2")); - } + using (var cache = redisManager.GetCacheClient()) + { + Assert.That(cache.Get("KEY"), Is.EqualTo("2")); } } [Test] public void RedisManagerPool_alternates_hosts() { - using (var redisManager = new RedisManagerPool(MasterHosts)) + using var redisManager = new RedisManagerPool(MasterHosts); + using (var master = redisManager.GetClient()) { - using (var master = redisManager.GetClient()) - { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.SetValue("KEY", "1"); - } - using (var master = redisManager.GetClient()) - { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.Increment("KEY", 1); - } + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } - 5.Times(i => - { - using (var readOnly = redisManager.GetReadOnlyClient()) - { - Assert.That(readOnly.GetHostString(), Is.EqualTo(MasterHosts[0])); - Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); - } - }); + 5.Times(i => { + using var readOnly = redisManager.GetReadOnlyClient(); + Assert.That(readOnly.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); + }); - using (var cahce = redisManager.GetCacheClient()) - { - Assert.That(cahce.Get("KEY"), Is.EqualTo("2")); - } + using (var cache = redisManager.GetCacheClient()) + { + Assert.That(cache.Get("KEY"), Is.EqualTo("2")); } } [Test] public void BasicRedisClientManager_alternates_hosts() { - using (var redisManager = new BasicRedisClientManager(MasterHosts, SlaveHosts)) + using (var redisManager = new BasicRedisClientManager(MasterHosts, ReplicaHosts)) { using (var master = redisManager.GetClient()) { @@ -129,18 +119,15 @@ public void BasicRedisClientManager_alternates_hosts() master.Increment("KEY", 1); } - 5.Times(i => - { - using (var readOnly = redisManager.GetReadOnlyClient()) - { - Assert.That(readOnly.GetHostString(), Is.EqualTo(SlaveHosts[i % SlaveHosts.Length])); - Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); - } + 5.Times(i => { + using var readOnly = redisManager.GetReadOnlyClient(); + Assert.That(readOnly.GetHostString(), Is.EqualTo(ReplicaHosts[i % ReplicaHosts.Length])); + Assert.That(readOnly.GetValue("KEY"), Is.EqualTo("2")); }); - using (var cahce = redisManager.GetCacheClient()) + using (var cache = redisManager.GetCacheClient()) { - Assert.That(cahce.Get("KEY"), Is.EqualTo("2")); + Assert.That(cache.Get("KEY"), Is.EqualTo("2")); } } } @@ -148,20 +135,20 @@ public void BasicRedisClientManager_alternates_hosts() public class FixedResolver : IRedisResolver { private readonly RedisEndpoint master; - private readonly RedisEndpoint slave; + private readonly RedisEndpoint replica; public int NewClientsInitialized = 0; - public FixedResolver(RedisEndpoint master, RedisEndpoint slave) + public FixedResolver(RedisEndpoint master, RedisEndpoint replica) { this.master = master; - this.slave = slave; + this.replica = replica; this.ClientFactory = RedisConfig.ClientFactory; } public Func ClientFactory { get; set; } - public int ReadWriteHostsCount { get { return 1; } } - public int ReadOnlyHostsCount { get { return 1; } } + public int ReadWriteHostsCount => 1; + public int ReadOnlyHostsCount => 1; public void ResetMasters(IEnumerable hosts) { } public void ResetSlaves(IEnumerable hosts) { } @@ -179,134 +166,127 @@ public RedisClient CreateMasterClient(int desiredIndex) public RedisClient CreateSlaveClient(int desiredIndex) { - return CreateRedisClient(slave, master: false); + return CreateRedisClient(replica, master: false); } } [Test] public void PooledRedisClientManager_can_execute_CustomResolver() { - var resolver = new FixedResolver(MasterHosts[0].ToRedisEndpoint(), SlaveHosts[0].ToRedisEndpoint()); - using (var redisManager = new PooledRedisClientManager("127.0.0.1:8888") + var resolver = new FixedResolver(MasterHosts[0].ToRedisEndpoint(), ReplicaHosts[0].ToRedisEndpoint()); + using var redisManager = new PooledRedisClientManager("127.0.0.1:8888") { RedisResolver = resolver - }) + }; + using (var master = redisManager.GetClient()) { - using (var master = redisManager.GetClient()) + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); + + 5.Times(i => + { + using (var replica = redisManager.GetReadOnlyClient()) { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.SetValue("KEY", "1"); + Assert.That(replica.GetHostString(), Is.EqualTo(ReplicaHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); } + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(2)); + + redisManager.FailoverTo("127.0.0.1:9999", "127.0.0.1:9999"); + + 5.Times(i => + { using (var master = redisManager.GetClient()) { Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.Increment("KEY", 1); + Assert.That(master.GetValue("KEY"), Is.EqualTo("2")); } - Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); - - 5.Times(i => + using (var replica = redisManager.GetReadOnlyClient()) { - using (var slave = redisManager.GetReadOnlyClient()) - { - Assert.That(slave.GetHostString(), Is.EqualTo(SlaveHosts[0])); - Assert.That(slave.GetValue("KEY"), Is.EqualTo("2")); - } - }); - Assert.That(resolver.NewClientsInitialized, Is.EqualTo(2)); - - redisManager.FailoverTo("127.0.0.1:9999", "127.0.0.1:9999"); - - 5.Times(i => - { - using (var master = redisManager.GetClient()) - { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - Assert.That(master.GetValue("KEY"), Is.EqualTo("2")); - } - using (var slave = redisManager.GetReadOnlyClient()) - { - Assert.That(slave.GetHostString(), Is.EqualTo(SlaveHosts[0])); - Assert.That(slave.GetValue("KEY"), Is.EqualTo("2")); - } - }); - Assert.That(resolver.NewClientsInitialized, Is.EqualTo(4)); - } + Assert.That(replica.GetHostString(), Is.EqualTo(ReplicaHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); + } + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(4)); } [Test] public void RedisManagerPool_can_execute_CustomResolver() { - var resolver = new FixedResolver(MasterHosts[0].ToRedisEndpoint(), SlaveHosts[0].ToRedisEndpoint()); - using (var redisManager = new RedisManagerPool("127.0.0.1:8888") + var resolver = new FixedResolver(MasterHosts[0].ToRedisEndpoint(), ReplicaHosts[0].ToRedisEndpoint()); + using var redisManager = new RedisManagerPool("127.0.0.1:8888") { RedisResolver = resolver - }) + }; + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.SetValue("KEY", "1"); + } + using (var master = redisManager.GetClient()) + { + Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); + master.Increment("KEY", 1); + } + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); + + 5.Times(i => { + using var replica = redisManager.GetReadOnlyClient(); + Assert.That(replica.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); + + redisManager.FailoverTo("127.0.0.1:9999", "127.0.0.1:9999"); + + 5.Times(i => { using (var master = redisManager.GetClient()) { Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.SetValue("KEY", "1"); + Assert.That(master.GetValue("KEY"), Is.EqualTo("2")); } - using (var master = redisManager.GetClient()) + using (var replica = redisManager.GetReadOnlyClient()) { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - master.Increment("KEY", 1); + Assert.That(replica.GetHostString(), Is.EqualTo(MasterHosts[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("2")); } - Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); - - 5.Times(i => - { - using (var slave = redisManager.GetReadOnlyClient()) - { - Assert.That(slave.GetHostString(), Is.EqualTo(MasterHosts[0])); - Assert.That(slave.GetValue("KEY"), Is.EqualTo("2")); - } - }); - Assert.That(resolver.NewClientsInitialized, Is.EqualTo(1)); - - redisManager.FailoverTo("127.0.0.1:9999", "127.0.0.1:9999"); - - 5.Times(i => - { - using (var master = redisManager.GetClient()) - { - Assert.That(master.GetHostString(), Is.EqualTo(MasterHosts[0])); - Assert.That(master.GetValue("KEY"), Is.EqualTo("2")); - } - using (var slave = redisManager.GetReadOnlyClient()) - { - Assert.That(slave.GetHostString(), Is.EqualTo(MasterHosts[0])); - Assert.That(slave.GetValue("KEY"), Is.EqualTo("2")); - } - }); - Assert.That(resolver.NewClientsInitialized, Is.EqualTo(2)); - } + }); + Assert.That(resolver.NewClientsInitialized, Is.EqualTo(2)); } - private static void InitializeEmptyRedisManagers(IRedisClientsManager redisManager, string[] masters, string[] slaves) + private static void InitializeEmptyRedisManagers(IRedisClientsManager redisManager, string[] masters, string[] replicas) { var hasResolver = (IHasRedisResolver)redisManager; hasResolver.RedisResolver.ResetMasters(masters); - hasResolver.RedisResolver.ResetSlaves(slaves); + hasResolver.RedisResolver.ResetSlaves(replicas); using (var master = redisManager.GetClient()) { Assert.That(master.GetHostString(), Is.EqualTo(masters[0])); master.SetValue("KEY", "1"); } - using (var slave = redisManager.GetReadOnlyClient()) + using (var replica = redisManager.GetReadOnlyClient()) { - Assert.That(slave.GetHostString(), Is.EqualTo(slaves[0])); - Assert.That(slave.GetValue("KEY"), Is.EqualTo("1")); + Assert.That(replica.GetHostString(), Is.EqualTo(replicas[0])); + Assert.That(replica.GetValue("KEY"), Is.EqualTo("1")); } } [Test] - public void Can_initalize_ClientManagers_with_no_hosts() + public void Can_initialize_ClientManagers_with_no_hosts() { - InitializeEmptyRedisManagers(new PooledRedisClientManager(), MasterHosts, SlaveHosts); + InitializeEmptyRedisManagers(new PooledRedisClientManager(), MasterHosts, ReplicaHosts); InitializeEmptyRedisManagers(new RedisManagerPool(), MasterHosts, MasterHosts); - InitializeEmptyRedisManagers(new BasicRedisClientManager(), MasterHosts, SlaveHosts); + InitializeEmptyRedisManagers(new BasicRedisClientManager(), MasterHosts, ReplicaHosts); } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelConnectionTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelConnectionTests.cs new file mode 100644 index 00000000..a90f92b4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelConnectionTests.cs @@ -0,0 +1,49 @@ +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests.Sentinel +{ + public class RedisSentinelConnectionTests + { + [Test] + public void Can_connect_to_AWS_Redis_Sentinel_SentinelMaster() + { + RedisConfig.AssumeServerVersion = 4000; + + var client = new RedisClient("52.7.181.87", 26379); + + var info = client.SentinelMaster("mymaster"); + + info.PrintDump(); + } + + [Test] + public void Can_connect_to_AWS_Redis_Sentinel_Ping() + { + RedisConfig.AssumeServerVersion = 4000; + + var client = new RedisClient("52.7.181.87", 26379); + + Assert.That(client.Ping()); + } + + [Test] + public void Can_connect_to_RedisSentinel() + { + RedisConfig.AssumeServerVersion = 4000; + + var sentinel = new RedisSentinel("52.7.181.87:26379") { + IpAddressMap = { + {"127.0.0.1", "52.7.181.87"} + } + }; + + var redisManager = sentinel.Start(); + + using (var client = redisManager.GetClient()) + { + Assert.That(client.Ping()); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs index e655f2a3..5a176a85 100644 --- a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTestBase.cs @@ -3,12 +3,15 @@ using System.IO; using System.Threading; using ServiceStack.Text; +#if NETCORE +using System.Runtime.InteropServices; +#endif namespace ServiceStack.Redis.Tests.Sentinel { public abstract class RedisSentinelTestBase { - public const bool DisableLocalServers = true; + public static bool DisableLocalServers = false; public const string MasterName = "mymaster"; public const string GCloudMasterName = "master"; @@ -18,7 +21,7 @@ public abstract class RedisSentinelTestBase "127.0.0.1:6380", }; - public static string[] SlaveHosts = new[] + public static string[] ReplicaHosts = new[] { "127.0.0.1:6381", "127.0.0.1:6382", @@ -74,25 +77,58 @@ public static RedisSentinel CreateGCloudSentinel() public static void StartRedisServer(int port) { + var exePath = new FileInfo("~/../../src/sentinel/redis/redis-server.exe".MapProjectPath()).FullName; +#if NETCORE + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + exePath = "redis-server"; +#endif + var configDir = "~/../../src/sentinel/redis-{0}/".Fmt(port).MapProjectPath(); + var configPath = Path.Combine(configDir, "redis.conf"); + + File.WriteAllText(configPath, + File.ReadAllText(Path.Combine(configDir,"redis.windows.conf")).Replace( + @"C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-{0}".Fmt(port), + configDir.Replace(@"\", @"\\") + ) + ); + var pInfo = new ProcessStartInfo { - FileName = new FileInfo(@"..\..\..\..\src\sentinel\redis\redis-server.exe").FullName, - Arguments = new FileInfo(@"..\..\..\..\src\sentinel\redis-{0}\redis.windows.conf".Fmt(port)).FullName, + FileName = exePath, + Arguments = new FileInfo(configPath).FullName, RedirectStandardError = true, RedirectStandardOutput = true, UseShellExecute = false, CreateNoWindow = true, }; + var result = Process.Start(pInfo); + ThreadPool.QueueUserWorkItem(state => Process.Start(pInfo)); } public static void StartRedisSentinel(int port) { + var exePath = new FileInfo("~/../../src/sentinel/redis/redis-server.exe".MapProjectPath()).FullName; +#if NETCORE + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + exePath = "redis-server"; +#endif + var configDir = "~/../../src/sentinel/redis-{0}/".Fmt(port).MapProjectPath(); + var configPath = Path.Combine(configDir, "redis.sentinel.conf"); + + File.WriteAllText(configPath, + File.ReadAllText(Path.Combine(configDir,"sentinel.conf")).Replace( + @"C:\\src\\ServiceStack.Redis\\src\\sentinel\\redis-{0}".Fmt(port), + configDir.Replace(@"\", @"\\") + ) + ); + + var pInfo = new ProcessStartInfo { - FileName = new FileInfo(@"..\..\..\..\src\sentinel\redis\redis-server.exe").FullName, - Arguments = new FileInfo(@"..\..\..\..\src\sentinel\redis-{0}\sentinel.conf".Fmt(port)).FullName + " --sentinel", + FileName = exePath, + Arguments = new FileInfo(configPath).FullName + " --sentinel", RedirectStandardError = true, RedirectStandardOutput = true, UseShellExecute = false, diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs index d6ed9d92..4302719d 100644 --- a/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs +++ b/tests/ServiceStack.Redis.Tests.Sentinel/RedisSentinelTests.cs @@ -3,7 +3,6 @@ using NUnit.Framework; using ServiceStack.Logging; using ServiceStack.Text; -using Timer = System.Timers.Timer; namespace ServiceStack.Redis.Tests.Sentinel { @@ -11,7 +10,7 @@ namespace ServiceStack.Redis.Tests.Sentinel public class RedisSentinelTests : RedisSentinelTestBase { - [TestFixtureSetUp] + [OneTimeSetUp] public void OnBeforeTestFixture() { StartAllRedisServers(); @@ -19,7 +18,7 @@ public void OnBeforeTestFixture() LogManager.LogFactory = new ConsoleLogFactory(debugEnabled:true); } - [TestFixtureTearDown] + [OneTimeTearDown] public void OnAfterTestFixture() { ShutdownAllRedisSentinels(); @@ -68,12 +67,12 @@ public void Can_Get_Sentinel_Master() } [Test] - public void Can_Get_Sentinel_Slaves() + public void Can_Get_Sentinel_Replicas() { - var slaves = RedisSentinel.SentinelSlaves(MasterName); - slaves.PrintDump(); + var replicas = RedisSentinel.SentinelSlaves(MasterName); + replicas.PrintDump(); - Assert.That(slaves.Count, Is.GreaterThan(0)); + Assert.That(replicas.Count, Is.GreaterThan(0)); } [Test] @@ -101,66 +100,51 @@ public void Can_Get_Master_Addr() [Test] public void Does_scan_for_other_active_sentinels() { - using (var sentinel = new RedisSentinel(SentinelHosts[0]) { + using var sentinel = new RedisSentinel(SentinelHosts[0]) { ScanForOtherSentinels = true - }) - { - var clientsManager = sentinel.Start(); + }; + var clientsManager = sentinel.Start(); - Assert.That(sentinel.SentinelHosts, Is.EquivalentTo(SentinelHosts)); + Assert.That(sentinel.SentinelHosts, Is.EquivalentTo(SentinelHosts)); - using (var client = clientsManager.GetClient()) - { - Assert.That(client.GetHostString(), Is.EqualTo(MasterHosts[0])); - } - } + using var client = clientsManager.GetClient(); + Assert.That(client.GetHostString(), Is.EqualTo(MasterHosts[0])); } [Test] public void Can_Get_Redis_ClientsManager() { - using (var sentinel = CreateSentinel()) - { - var clientsManager = sentinel.Start(); - using (var client = clientsManager.GetClient()) - { - Assert.That(client.GetHostString(), Is.EqualTo(MasterHosts[0])); - } - } + using var sentinel = CreateSentinel(); + var clientsManager = sentinel.Start(); + using var client = clientsManager.GetClient(); + Assert.That(client.GetHostString(), Is.EqualTo(MasterHosts[0])); } [Test] public void Can_specify_Timeout_on_RedisManager() { - using (var sentinel = CreateSentinel()) - { - sentinel.RedisManagerFactory = (masters, slaves) => new PooledRedisClientManager(masters, slaves) { IdleTimeOutSecs = 20 }; + using var sentinel = CreateSentinel(); + sentinel.RedisManagerFactory = (masters, replicas) => new PooledRedisClientManager(masters, replicas) { IdleTimeOutSecs = 20 }; - using (var clientsManager = (PooledRedisClientManager)sentinel.Start()) - using (var client = clientsManager.GetClient()) - { - Assert.That(clientsManager.IdleTimeOutSecs, Is.EqualTo(20)); - Assert.That(((RedisNativeClient)client).IdleTimeOutSecs, Is.EqualTo(20)); - } - } + using var clientsManager = (PooledRedisClientManager)sentinel.Start(); + using var client = clientsManager.GetClient(); + Assert.That(clientsManager.IdleTimeOutSecs, Is.EqualTo(20)); + Assert.That(((RedisNativeClient)client).IdleTimeOutSecs, Is.EqualTo(20)); } [Test] public void Can_specify_db_on_RedisSentinel() { - using (var sentinel = CreateSentinel()) - { - sentinel.HostFilter = host => "{0}?db=1".Fmt(host); - - using (var clientsManager = sentinel.Start()) - using (var client = clientsManager.GetClient()) - { - Assert.That(client.Db, Is.EqualTo(1)); - } - } + using var sentinel = CreateSentinel(); + sentinel.HostFilter = host => "{0}?db=1".Fmt(host); + + using var clientsManager = sentinel.Start(); + using var client = clientsManager.GetClient(); + Assert.That(client.Db, Is.EqualTo(1)); } - [Ignore, Test] + [Test] + [Ignore("Long running test")] public void Run_sentinel_for_10_minutes() { ILog log = LogManager.GetLogger(GetType()); @@ -173,12 +157,7 @@ public void Run_sentinel_for_10_minutes() using (var redisManager = sentinel.Start()) { - var aTimer = new Timer - { - Interval = 1000, - Enabled = true - }; - aTimer.Elapsed += (sender, args) => + var aTimer = new Timer((state) => { "Incrementing key".Print(); @@ -197,7 +176,7 @@ public void Run_sentinel_for_10_minutes() var value = redis.GetValue(key); log.InfoFormat("{0} = {1}", key, value); } - }; + }, null, 0, 1000); } } diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj b/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj index fa5e11cd..2d59d514 100644 --- a/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj +++ b/tests/ServiceStack.Redis.Tests.Sentinel/ServiceStack.Redis.Tests.Sentinel.csproj @@ -1,90 +1,51 @@ - - - + + - Debug - AnyCPU - {91C55091-A946-49B5-9517-8794EBCC5784} - Library - Properties - ServiceStack.Redis.Tests.Sentinel + net472;net6.0 + portable ServiceStack.Redis.Tests.Sentinel - v4.5 - 512 - ..\..\src\ - true - - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 + Library + ServiceStack.Redis.Tests.Sentinel + false + false + false + false + false + false + false + false + - - ..\..\src\packages\NUnit.2.6.4\lib\nunit.framework.dll - True - - - ..\..\lib\ServiceStack.Client.dll - - - ..\..\lib\ServiceStack.Common.dll - - - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\ServiceStack.Text.dll - - - - - - - - + + - - - - - + + + + - - - - - - {af99f19b-4c04-4f58-81ef-b092f1fcc540} - ServiceStack.Redis - + + + $(DefineConstants);NET45 + + + + + + + + + + + + + + $(DefineConstants);NETCORE;NET6_0;NET6_0_OR_GREATER + + + - - - - - This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. - - - - - \ No newline at end of file + + diff --git a/tests/ServiceStack.Redis.Tests.Sentinel/packages.config b/tests/ServiceStack.Redis.Tests.Sentinel/packages.config deleted file mode 100644 index c714ef3a..00000000 --- a/tests/ServiceStack.Redis.Tests.Sentinel/packages.config +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/AdhocClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/AdhocClientTests.Async.cs new file mode 100644 index 00000000..0bbf678f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/AdhocClientTests.Async.cs @@ -0,0 +1,25 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class AdhocClientTestsAsync + { + [Test] + public async Task Search_Test() + { + await using var client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + + const string cacheKey = "urn+metadata:All:SearchProProfiles?SwanShinichi Osawa /0/8,0,0,0"; + const long value = 1L; + await client.SetAsync(cacheKey, value); + var result = await client.GetAsync(cacheKey); + + Assert.That(result, Is.EqualTo(value)); + } + + // remaining tests from parent do not touch redis + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs b/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs index d1191e8b..254f1668 100644 --- a/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/AdhocClientTests.cs @@ -63,7 +63,6 @@ public void Can_infer_utf8_bytes() [Test] public void Convert_int() { - var test = 1234; Debug.WriteLine(BitConverter.ToString(1234.ToString().ToUtf8Bytes())); } diff --git a/tests/ServiceStack.Redis.Tests/App.config b/tests/ServiceStack.Redis.Tests/App.config index 2c98e0db..c39009c1 100644 --- a/tests/ServiceStack.Redis.Tests/App.config +++ b/tests/ServiceStack.Redis.Tests/App.config @@ -1,6 +1,6 @@ - + diff --git a/tests/ServiceStack.Redis.Tests/AsyncImplementationsTests.Async.cs b/tests/ServiceStack.Redis.Tests/AsyncImplementationsTests.Async.cs new file mode 100644 index 00000000..3b835c38 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/AsyncImplementationsTests.Async.cs @@ -0,0 +1,882 @@ +// Copyright (c) Service Stack LLC. All Rights Reserved. +// License: https://raw.github.com/ServiceStack/ServiceStack/master/license.txt + +using NUnit.Framework; +using ServiceStack.Caching; +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Pipeline; +using ServiceStack.Redis.Support.Locking; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Drawing.Text; +using System.Globalization; +using System.Linq; +using System.Linq.Expressions; +using System.Reflection; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + // verify that anything that implements IFoo also implements IFooAsync + [Category("Async")] + public class AsyncImplementationTests + { + private static readonly Type[] AllTypes + = typeof(RedisClient).Assembly.GetTypes() + .Concat(typeof(AsyncImplementationTests).Assembly.GetTypes()) + .Where(x => x.IsClass) + .OrderBy(x => x.FullName).ToArray(); + + private string Log(string message) + { + TestContext.Out.WriteLine(message); + return message; + } + + [TestCase(typeof(ICacheClient), typeof(ICacheClientAsync))] + [TestCase(typeof(IEntityStore), typeof(IEntityStoreAsync))] + [TestCase(typeof(IEntityStore<>), typeof(IEntityStoreAsync<>))] + [TestCase(typeof(IRedisClient), typeof(IRedisClientAsync))] + + [TestCase(typeof(IRedisClientsManager), typeof(IRedisClientsManagerAsync))] + [TestCase(typeof(IRedisNativeClient), typeof(IRedisNativeClientAsync))] + [TestCase(typeof(IRedisPipeline), typeof(IRedisPipelineAsync))] + [TestCase(typeof(IRedisPipelineShared), typeof(IRedisPipelineSharedAsync))] + [TestCase(typeof(IRedisQueueableOperation), typeof(IRedisQueueableOperationAsync))] + + [TestCase(typeof(IRedisQueueCompletableOperation), typeof(IRedisQueueCompletableOperationAsync))] + [TestCase(typeof(IRedisTransaction), typeof(IRedisTransactionAsync))] + [TestCase(typeof(IRedisTransactionBase), typeof(IRedisTransactionBaseAsync))] + [TestCase(typeof(IRedisTypedClient<>), typeof(IRedisTypedClientAsync<>))] + [TestCase(typeof(IRemoveByPattern), typeof(IRemoveByPatternAsync))] + + [TestCase(typeof(IDistributedLock), typeof(IDistributedLockAsync))] + [TestCase(typeof(IRedisSubscription), typeof(IRedisSubscriptionAsync))] + [TestCase(typeof(IRedisHash), typeof(IRedisHashAsync))] + [TestCase(typeof(IRedisSortedSet), typeof(IRedisSortedSetAsync))] + [TestCase(typeof(IRedisSet), typeof(IRedisSetAsync))] + + [TestCase(typeof(IRedisList), typeof(IRedisListAsync))] + [TestCase(typeof(IRedisHash<,>), typeof(IRedisHashAsync<,>))] + [TestCase(typeof(IRedisSortedSet<>), typeof(IRedisSortedSetAsync<>))] + [TestCase(typeof(IRedisSet<>), typeof(IRedisSetAsync<>))] + [TestCase(typeof(IRedisList<>), typeof(IRedisListAsync<>))] + + [TestCase(typeof(IRedisTypedPipeline<>), typeof(IRedisTypedPipelineAsync<>))] + [TestCase(typeof(IRedisTypedQueueableOperation<>), typeof(IRedisTypedQueueableOperationAsync<>))] + [TestCase(typeof(IRedisTypedTransaction<>), typeof(IRedisTypedTransactionAsync<>))] + + public void TestSameAPI(Type syncInterface, Type asyncInterface) + { + TestContext.Out.WriteLine($"Comparing '{GetCSharpTypeName(syncInterface)}' and '{GetCSharpTypeName(asyncInterface)}'..."); + + var actual = new List(); + foreach (var method in asyncInterface.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly)) + { + var tok = new MethodToken(method); + actual.Add(GetSignature(tok)); + } + + var expected = new List(); + ParameterToken cancellationParameter = new ParameterToken("token", typeof(CancellationToken), ParameterAttributes.Optional); + foreach (var method in syncInterface.GetMethods(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly)) + { + AddExpected(method); + } + if (asyncInterface == typeof(IRedisSortedSetAsync) + || asyncInterface == typeof(IRedisSetAsync) + || asyncInterface == typeof(IRedisListAsync)) + { + AddFrom(typeof(ICollection), nameof(ICollection.Clear)); + AddFrom(typeof(ICollection), nameof(ICollection.Add)); + AddFrom(typeof(ICollection), nameof(ICollection.Remove)); + AddFrom(typeof(ICollection), nameof(ICollection.Contains)); + AddFrom(typeof(ICollection), "get_" + nameof(ICollection.Count), true); + + if (asyncInterface == typeof(IRedisListAsync)) + { + AddFrom(typeof(IList), nameof(IList.IndexOf)); + AddFrom(typeof(IList), nameof(IList.RemoveAt)); + AddFrom(typeof(IList), "set_Item", true); + AddFrom(typeof(IList), "get_Item", true); + } + } + else if (asyncInterface == typeof(IRedisSortedSetAsync<>) + || asyncInterface == typeof(IRedisSetAsync<>) + || asyncInterface == typeof(IRedisListAsync<>)) + { + AddFrom(typeof(ICollection<>), nameof(ICollection.Clear)); + AddFrom(typeof(ICollection<>), nameof(ICollection.Add)); + AddFrom(typeof(ICollection<>), nameof(ICollection.Remove)); + AddFrom(typeof(ICollection<>), nameof(ICollection.Contains)); + AddFrom(typeof(ICollection<>), "get_" + nameof(ICollection.Count), true); + + if (asyncInterface == typeof(IRedisListAsync<>)) + { + AddFrom(typeof(IList<>), nameof(IList.IndexOf)); + AddFrom(typeof(IList<>), nameof(IList.RemoveAt)); + AddFrom(typeof(IList<>), "set_Item", true); + AddFrom(typeof(IList<>), "get_Item", true); + } + } + else if (asyncInterface == typeof(IRedisHashAsync<,>)) + { + AddFrom(typeof(ICollection<>).MakeGenericType(typeof(KeyValuePair<,>).MakeGenericType(asyncInterface.GetGenericArguments())), nameof(IDictionary.Add)); + AddFrom(typeof(IDictionary<,>), nameof(IDictionary.Add)); + AddFrom(typeof(ICollection<>), nameof(IDictionary.Clear)); + AddFrom(typeof(IDictionary<,>), nameof(IDictionary.ContainsKey)); + AddFrom(typeof(IDictionary<,>), nameof(IDictionary.Remove)); + AddFrom(typeof(ICollection<>), "get_" + nameof(IDictionary.Count), true); + } + else if (asyncInterface == typeof(IRedisHashAsync)) + { + AddFrom(typeof(ICollection>), nameof(IDictionary.Add)); + AddFrom(typeof(IDictionary), nameof(IDictionary.Add)); + AddFrom(typeof(ICollection), nameof(IDictionary.Clear)); + AddFrom(typeof(IDictionary), nameof(IDictionary.ContainsKey)); + AddFrom(typeof(IDictionary), nameof(IDictionary.Remove)); + AddFrom(typeof(ICollection), "get_" + nameof(IDictionary.Count), true); + } + else if (asyncInterface == typeof(IRedisNativeClientAsync)) + { + AddFrom(typeof(RedisClient), nameof(RedisClient.SlowlogReset)); + AddFrom(typeof(RedisClient), nameof(RedisClient.BitCount)); + AddFromTyped(typeof(RedisClient), nameof(RedisClient.ZCount), typeof(string), typeof(double), typeof(double)); + // can't expose as SlowlogItem because of interface locations + expected.Add("ValueTask SlowlogGetAsync(int? top = default, CancellationToken token = default)"); + // adding missing "exists" capability + expected.Add("ValueTask SetAsync(string key, byte[] value, bool exists, long expirySeconds = 0, long expiryMilliseconds = 0, CancellationToken token = default)"); + } + else if (asyncInterface == typeof(IRedisClientAsync)) + { + expected.Add("ValueTask GetSlowlogAsync(int? numberOfRecords = default, CancellationToken token = default)"); + expected.Add("ValueTask SlowlogResetAsync(CancellationToken token = default)"); + } + else if (asyncInterface == typeof(ICacheClientAsync)) + { + AddFrom(typeof(ICacheClientExtended), nameof(ICacheClientExtended.GetKeysByPattern)); + AddFrom(typeof(ICacheClientExtended), nameof(ICacheClientExtended.GetTimeToLive)); + AddFrom(typeof(ICacheClientExtended), nameof(ICacheClientExtended.RemoveExpiredEntries)); + } + + void AddFrom(Type syncInterface, string name, bool fromPropertyToMethod = false) + => AddExpected(syncInterface.GetMethod(name), fromPropertyToMethod); + void AddFromTyped(Type syncInterface, string name, params Type[] types) + => AddExpected(syncInterface.GetMethod(name, types), false); + + Type AsyncType(Type result) + { + bool useTask = asyncInterface == typeof(ICacheClientAsync) + || asyncInterface == typeof(IRemoveByPatternAsync) + || asyncInterface == typeof(IEntityStoreAsync) + || asyncInterface == typeof(IEntityStoreAsync<>); + + if (result is null || result == typeof(void)) + return useTask ? typeof(Task) : typeof(ValueTask); + + return (useTask ? typeof(Task<>) : typeof(ValueTask<>)).MakeGenericType(result); + } + void AddExpected(MethodInfo method, bool fromPropertyToMethod = false) + { + if (method is null) return; + var tok = new MethodToken(method); + + ParameterToken[] parameters = tok.GetParameters(); + + // think about the return type + Type returnType; + if (tok.ReturnType == typeof(void)) + { + returnType = AsyncType(tok.ReturnType); + } + else if (tok.ReturnType == typeof(IDisposable)) + { + returnType = typeof(IAsyncDisposable); + } + else if (tok.ReturnType.IsGenericType && tok.ReturnType.GetGenericTypeDefinition() == typeof(IEnumerable<>)) + { + returnType = typeof(IAsyncEnumerable<>).MakeGenericType(tok.ReturnType.GetGenericArguments()); + } + else + { + returnType = AsyncType(SwapForAsyncIfNeedeed(tok.ReturnType)); + } + string name = tok.Name + "Async"; + bool addCancellation = true; + // sniff to see if this is a delegate hook + if (parameters.Length == 0 && typeof(Delegate).IsAssignableFrom(tok.ReturnType) && name.StartsWith("get_")) + { + // property getter; replace with event add + returnType = typeof(void); + name = "add_" + name.Substring(4); + parameters = new[] { new ParameterToken("value", ActionDelegateToFunc(tok.ReturnType), default) }; + + } + else if (parameters.Length == 1 && tok.ReturnType == typeof(void) && name.StartsWith("set_") + && typeof(Delegate).IsAssignableFrom(parameters[0].ParameterType)) + { + // property setter; replace with event remove + returnType = typeof(void); + name = "remove_" + name.Substring(4); + ref ParameterToken p = ref parameters[0]; + p = p.WithParameterType(ActionDelegateToFunc(p.ParameterType)); + } + + if (name.StartsWith("get_") || name.StartsWith("set_") || name.StartsWith("add_") || name.StartsWith("remove_")) + { + bool preserve = (name.StartsWith("get_") || name.StartsWith("set_")), fullyHandled = false; + if (asyncInterface == typeof(IRedisNativeClientAsync) || asyncInterface == typeof(IRedisClientAsync) + || asyncInterface == typeof(IRedisTypedClientAsync<>)) + { + switch (tok.Name) + { + case "get_" + nameof(IRedisNativeClient.DbSize): + case "get_" + nameof(IRedisNativeClient.LastSave): + case "get_" + nameof(IRedisNativeClient.Info): + fromPropertyToMethod = true; + preserve = false; + break; + case "set_" + nameof(IRedisNativeClient.Db): + name = nameof(IRedisNativeClientAsync.SelectAsync); + parameters[0] = parameters[0].WithName("db"); + fullyHandled = true; + break; + case "set_" + nameof(IRedisClientAsync.Hashes): + case "set_" + nameof(IRedisClientAsync.Lists): + case "set_" + nameof(IRedisClientAsync.Sets): + case "set_" + nameof(IRedisClientAsync.SortedSets): + return; // no "set" included + case "get_Item": + case "set_Item": + return; // no indexer + } + } + + if (fromPropertyToMethod) + { + name = name switch + { + "get_ItemAsync" => "ElementAtAsync", + "set_ItemAsync" => "SetValueAsync", + _ => name.Substring(4), // don't worry about the remove, that isn't in this catchment + }; + } + else if (preserve && !fullyHandled) + { // just keep it the same by default + name = tok.Name; + returnType = SwapForAsyncIfNeedeed(tok.ReturnType); + addCancellation = false; + } + + else if (fullyHandled) { } + else + { + addCancellation = false; + } + } + + static Type ActionDelegateToFunc(Type type) + { + if (type.IsGenericType) + { + var genDef = type.GetGenericTypeDefinition(); + var targs = type.GetGenericArguments(); + Array.Resize(ref targs, targs.Length + 1); + targs[targs.Length - 1] = typeof(ValueTask); + return Expression.GetFuncType(targs); + } + return type; + } + + if (asyncInterface == typeof(IRedisQueueCompletableOperationAsync) && parameters.Length == 1) + { + // very unusual case; Func => Func> + returnType = typeof(void); + ref ParameterToken p = ref parameters[0]; + if (p.ParameterType == typeof(Action)) + { + p = p.WithParameterType(typeof(Func)); + } + else + { + p = p.WithParameterType(typeof(Func<,>).MakeGenericType( + typeof(CancellationToken), typeof(ValueTask<>).MakeGenericType(p.ParameterType.GetGenericArguments()))); + } + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + } + else if (asyncInterface == typeof(IRedisQueueableOperationAsync) || asyncInterface == typeof(IRedisTypedQueueableOperationAsync<>)) + { + // very unusual case; Func => Func> + if (parameters.Length != 3) return; // move to optionals rather than overloads + ref ParameterToken p = ref parameters[0]; // fixup the delegate type + if (p.ParameterType.IsGenericType) + { + var genDef = p.ParameterType.GetGenericTypeDefinition(); + Type[] funcTypes = p.ParameterType.GetGenericArguments(); + funcTypes[0] = SwapForAsyncIfNeedeed(funcTypes[0]); + + if (genDef == typeof(Action<>)) + { + Array.Resize(ref funcTypes, funcTypes.Length + 1); + funcTypes[funcTypes.Length - 1] = typeof(ValueTask); + } + else + { + funcTypes[funcTypes.Length - 1] = typeof(ValueTask<>) + .MakeGenericType(funcTypes[funcTypes.Length - 1]); + } + + p = p.WithParameterType(typeof(Func<,>).MakeGenericType(funcTypes)); + } + + // make the other parameters optional + p = ref parameters[1]; + p = p.WithAttributes(p.Attributes | ParameterAttributes.Optional); + p = ref parameters[2]; + p = p.WithAttributes(p.Attributes | ParameterAttributes.Optional); + returnType = typeof(void); + name = method.Name; // retain the original name + + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + } + else + { + for (int i = 0; i < parameters.Length; i++) + { + ref ParameterToken p = ref parameters[i]; + Type type = p.ParameterType, swapped = SwapForAsyncIfNeedeed(type); + if (type != swapped) + { + p = p.WithParameterType(swapped); + } + } + + static bool IsParams(in MethodToken tok) + { + var ps = tok.GetParameters(); + if (ps is null || ps.Length == 0) return false; + return ps.Last().IsDefined(typeof(ParamArrayAttribute)); + } + + if (IsParams(tok)) + { + // include it with params but without CancellationToken + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + + // and now remove the params so we can get with CancellationToken + ref ParameterToken p = ref parameters[parameters.Length - 1]; + p = p.WithAllAttributes(p.AllAttributes().Where(a => !(a is ParamArrayAttribute)).ToArray()); + } + + if (asyncInterface == typeof(IDistributedLockAsync) && name == nameof(IDistributedLockAsync.LockAsync)) + { + // can't use "out", so uses a new LockState type instead + returnType = typeof(ValueTask); + parameters = RemoveByRef(parameters); + + static ParameterToken[] RemoveByRef(ParameterToken[] parameters) + { + if (parameters.Any(x => x.ParameterType.IsByRef)) + { + parameters = parameters.Where(x => !x.ParameterType.IsByRef).ToArray(); + } + return parameters; + } + } + if (asyncInterface == typeof(IRedisNativeClientAsync)) + { + switch (tok.Name) + { + case nameof(IRedisNativeClient.DecrBy): + case nameof(IRedisNativeClient.IncrBy): + parameters[1] = parameters[1].WithParameterType(typeof(long)); + returnType = typeof(ValueTask); + break; + case nameof(IRedisNativeClient.Shutdown): + Insert(ref parameters, 0, new ParameterToken("noSave", typeof(bool), ParameterAttributes.Optional, false)); + break; + case nameof(IRedisNativeClient.Set): + Insert(ref parameters, 2, new ParameterToken("expirySeconds", typeof(long), ParameterAttributes.Optional, 0)); + Insert(ref parameters, 3, new ParameterToken("expiryMilliseconds", typeof(long), ParameterAttributes.Optional, 0)); + break; + } + + static void Insert(ref ParameterToken[] parameters, int index, ParameterToken value) + { + // don't try to be clever; this is inefficient but correct + var list = parameters.ToList(); + list.Insert(index, value); + parameters = list.ToArray(); + } + } + + if (asyncInterface == typeof(IRedisSubscriptionAsync) && tok.Name == "get_" + nameof(IRedisSubscription.SubscriptionCount)) + { + // this is a purely client value; don't treat as async + name = tok.Name; + returnType = tok.ReturnType; + } + + if (asyncInterface == typeof(IRedisClientAsync) || asyncInterface == typeof(IRedisTypedClientAsync<>)) + { + switch (tok.Name) + { + case nameof(IRedisClient.UrnKey): + case nameof(IRedisClient.As): + addCancellation = false; + name = tok.Name; + returnType = SwapForAsyncIfNeedeed(tok.ReturnType); + break; + case nameof(IRedisClient.Save): // to avoid AsyncAsync and overloaded meaning of Async + name = nameof(IRedisClientAsync.ForegroundSaveAsync); + break; + case nameof(IRedisClient.SaveAsync): // to avoid AsyncAsync and overloaded meaning of Async + name = nameof(IRedisClientAsync.BackgroundSaveAsync); + break; + case nameof(IRedisClient.RewriteAppendOnlyFileAsync): // for consistency + name = nameof(IRedisClientAsync.BackgroundRewriteAppendOnlyFileAsync); + break; + case nameof(IRedisClient.ExecCachedLua): + // Func scriptSha1 => Func> scriptSha1 + parameters[1] = parameters[1].WithParameterType(typeof(Func<,>).MakeGenericType(typeof(string), typeof(ValueTask<>).MakeGenericType(method.GetGenericArguments()))); + break; + case nameof(IRedisClient.AcquireLock) when asyncInterface == typeof(IRedisClientAsync): + if (parameters.Length != 2) return; // 2 overloads combined into 1 + parameters[1] = parameters[1].AsNullable().AsOptional(); + returnType = typeof(ValueTask<>).MakeGenericType(returnType); // add await for acquisition + break; + case nameof(IRedisClient.AcquireLock) when asyncInterface == typeof(IRedisTypedClientAsync<>): + if (parameters.Length != 1) return; // 2 overloads combined into 1 + parameters[0] = parameters[0].AsNullable().AsOptional(); + returnType = typeof(ValueTask<>).MakeGenericType(returnType); // add await for acquisition + break; + case nameof(IRedisClient.SetValueIfExists) when asyncInterface == typeof(IRedisClientAsync): + case nameof(IRedisClient.SetValueIfNotExists) when asyncInterface == typeof(IRedisClientAsync): + if (parameters.Length != 3) return; // 2 overloads combined into 1 + parameters[2] = parameters[2].AsNullable().AsOptional(); + break; + case nameof(IRedisClient.CreatePipeline): + case nameof(IRedisTypedClient.GetHash): + addCancellation = false; + name = tok.Name; + returnType = SwapForAsyncIfNeedeed(tok.ReturnType); + break; + } + } + + for (int i = 0; i < parameters.Length; i++) + { + ref ParameterToken p = ref parameters[i]; + var type = p.ParameterType; + if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(Dictionary<,>)) + { + // prefer IDictionary<,> to Dictionary<,> + p = p.WithParameterType(typeof(IDictionary<,>).MakeGenericType(type.GetGenericArguments())); + } + } + + // append optional CancellationToken + if (addCancellation) + { + Array.Resize(ref parameters, parameters.Length + 1); + parameters[parameters.Length - 1] = cancellationParameter; + } + tok = new MethodToken(name, returnType, parameters, tok.IsGenericMethod, tok.IsGenericMethodDefinition, tok.GetGenericArguments(), tok.AllAttributes()); + expected.Add(GetSignature(tok)); + } + } + + actual.Sort(); + expected.Sort(); + int extra = 0, missing = 0, match = 0; + Log($"actual: {actual.Count}, expected: {expected.Count}"); + foreach (var method in actual.Except(expected)) + { + Log($"+ {method}"); + extra++; + } + foreach (var method in expected.Except(actual)) + { + Log($"- {method}"); + missing++; + } + foreach (var method in expected.Intersect(actual)) + { + Log($"= {method}"); + match++; + } + Assert.True(extra == 0 && missing == 0, $"signature mismatch on {GetCSharpTypeName(asyncInterface)}; missing: {missing}, extra: {extra}, match: {match}"); + + + static Type SwapForAsyncIfNeedeed(Type type) + { + if (type.IsArray) + { + var t = type.GetElementType(); + var swapped = SwapForAsyncIfNeedeed(t); + if (t != swapped) + { + var rank = type.GetArrayRank(); + return swapped.MakeArrayType(rank); + } + return type; + } + if (type == typeof(IRedisClient)) return typeof(IRedisClientAsync); + if (type == typeof(ICacheClient)) return typeof(ICacheClientAsync); + if (type == typeof(IRedisPipeline)) return typeof(IRedisPipelineAsync); + if (type == typeof(IRedisPipelineShared)) return typeof(IRedisPipelineSharedAsync); + if (type == typeof(IDisposable)) return typeof(IAsyncDisposable); + if (type == typeof(IRedisList)) return typeof(IRedisListAsync); + if (type == typeof(IRedisSet)) return typeof(IRedisSetAsync); + if (type == typeof(IRedisSortedSet)) return typeof(IRedisSortedSetAsync); + if (type == typeof(IRedisHash)) return typeof(IRedisHashAsync); + if (type == typeof(IRedisSubscription)) return typeof(IRedisSubscriptionAsync); + if (type == typeof(IRedisTransaction)) return typeof(IRedisTransactionAsync); + + if (type.IsGenericType) + { + var genDef = type.GetGenericTypeDefinition(); + var targs = type.GetGenericArguments(); + for (int i = 0; i < targs.Length; i++) + targs[i] = SwapForAsyncIfNeedeed(targs[i]); + + if (genDef == typeof(IRedisTypedClient<>)) return typeof(IRedisTypedClientAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisList<>)) return typeof(IRedisListAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisSet<>)) return typeof(IRedisSetAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisSortedSet<>)) return typeof(IRedisSortedSetAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisTypedTransaction<>)) return typeof(IRedisTypedTransactionAsync<>).MakeGenericType(targs); + if (genDef == typeof(IRedisHash<,>)) return typeof(IRedisHashAsync<,>).MakeGenericType(targs); + if (genDef == typeof(IRedisTypedPipeline<>)) return typeof(IRedisTypedPipelineAsync<>).MakeGenericType(targs); + + return genDef.MakeGenericType(targs); + } + + return type; + } + } + + static string GetCSharpTypeName(Type type) + { + if (!(type.IsGenericType || type.IsArray)) + { + return GetSimpleCSharpTypeName(type); + } + var sb = new StringBuilder(); + AppendCSharpTypeName(type, sb); + return sb.ToString(); + } + static string GetSimpleCSharpTypeName(Type type) + { + if (type == typeof(void)) return "void"; + if (type == typeof(bool)) return "bool"; + if (type == typeof(sbyte)) return "sbyte"; + if (type == typeof(short)) return "short"; + if (type == typeof(int)) return "int"; + if (type == typeof(long)) return "long"; + if (type == typeof(byte)) return "byte"; + if (type == typeof(ushort)) return "ushort"; + if (type == typeof(uint)) return "uint"; + if (type == typeof(ulong)) return "ulong"; + if (type == typeof(string)) return "string"; + if (type == typeof(double)) return "double"; + if (type == typeof(float)) return "float"; + if (type == typeof(object)) return "object"; + + return type.Name; + } + + static void AppendCSharpTypeName(Type type, StringBuilder sb) + { + if (type.IsArray) + { + // we won't worry about the difference between vector and non-vector rank zero arrays + AppendCSharpTypeName(type.GetElementType(), sb); + sb.Append("[").Append(',', type.GetArrayRank() - 1).Append("]"); + } + else if (type.IsGenericParameter) + { + sb.Append(type.Name); + } + else if (type.IsGenericType) + { + var nullable = Nullable.GetUnderlyingType(type); + if (nullable is object) + { + AppendCSharpTypeName(nullable, sb); + sb.Append("?"); + } + else + { + var name = type.Name; + int i = name.IndexOf('`'); + if (i < 0) + { + sb.Append(name); + } + else + { + sb.Append(name, 0, i); + } + sb.Append("<"); + var targs = type.GetGenericArguments(); + for (i = 0; i < targs.Length; i++) + { + if (i != 0) sb.Append(", "); + sb.Append(GetCSharpTypeName(targs[i])); + } + sb.Append(">"); + } + } + else + { + sb.Append(GetSimpleCSharpTypeName(type)); + } + } + static string GetSignature(MethodToken method) + { + var sb = new StringBuilder(); + AppendCSharpTypeName(method.ReturnType, sb); + sb.Append(' ').Append(method.Name); + if (method.IsGenericMethodDefinition) + { + sb.Append('<'); + var args = method.GetGenericArguments(); + for (int i = 0; i < args.Length; i++) + { + if (i != 0) sb.Append(", "); + sb.Append(args[i].Name); + } + sb.Append('>'); + } + sb.Append('('); + var ps = method.GetParameters(); + for (int i = 0; i < ps.Length; i++) + { + var p = ps[i]; + if (i != 0) sb.Append(", "); + if (p.IsDefined(typeof(ParamArrayAttribute))) + { + sb.Append("params "); + } + if (p.ParameterType.IsByRef) + { + const ParameterAttributes InOut = ParameterAttributes.In | ParameterAttributes.Out; + sb.Append((p.Attributes & InOut) switch + { + ParameterAttributes.In => "in", + ParameterAttributes.Out => "out", + _ => "ref" + }).Append(' '); + AppendCSharpTypeName(p.ParameterType.GetElementType(), sb); + } + else + { + AppendCSharpTypeName(p.ParameterType, sb); + } + sb.Append(' ').Append(p.Name); + if ((p.Attributes & ParameterAttributes.Optional) == ParameterAttributes.Optional) + { + sb.Append(" = "); + switch (p.DefaultValue) + { + case null: + case DBNull _: // used for delegates, honest! + sb.Append("default"); + break; + case string s: + sb.Append(@"""").Append(s.Replace(@"""", @"""""")).Append(@""""); + break; + case object o: + sb.Append(Convert.ToString(o, CultureInfo.InvariantCulture)); + break; + } + } + } + return sb.Append(')').ToString(); + } + + readonly struct ParameterToken + { + public bool IsDefined(Type attributeType) + => _allAttributes.Any(a => attributeType.IsAssignableFrom(a.GetType())); + public object DefaultValue { get; } + public ParameterAttributes Attributes { get; } + public string Name { get; } + public Type ParameterType { get; } + private readonly object[] _allAttributes; + public object[] AllAttributes() => MethodToken.Clone(_allAttributes); + + internal ParameterToken WithAllAttributes(params object[] allAttributes) + => new ParameterToken(Name, ParameterType, Attributes, DefaultValue, allAttributes); + + internal ParameterToken WithParameterType(Type parameterType) + => new ParameterToken(Name, parameterType, Attributes, DefaultValue, _allAttributes); + + internal ParameterToken AsNullable() + { + if (!ParameterType.IsValueType) return this; // already nullable (ish) + var existing = Nullable.GetUnderlyingType(ParameterType); + if (existing is object) return this; // already nullable + return WithParameterType(typeof(Nullable<>).MakeGenericType(ParameterType)); + } + + internal ParameterToken AsOptional() + => WithAttributes(Attributes | ParameterAttributes.Optional); + + internal ParameterToken WithAttributes(ParameterAttributes attributes) + => new ParameterToken(Name, ParameterType, attributes, DefaultValue, _allAttributes); + + internal ParameterToken WithName(string name) + => new ParameterToken(name, ParameterType, Attributes, DefaultValue, _allAttributes); + + public ParameterToken(ParameterInfo source) + { + Name = source.Name; + ParameterType = source.ParameterType; + Attributes = source.Attributes; + DefaultValue = source.DefaultValue; + _allAttributes = source.AllAttributes(); + } + + public ParameterToken(string name, Type parameterType, ParameterAttributes attributes, object defaultValue = default, params object[] allAttributes) + { + Name = name; + ParameterType = parameterType; + Attributes = attributes; + DefaultValue = defaultValue; + _allAttributes = allAttributes ?? Array.Empty(); + } + } + + readonly struct MethodToken + { + private readonly ParameterToken[] _parameters; + private readonly Type[] _genericArguments; + private readonly object[] _allAttributes; + public bool IsDefined(Type attributeType) + => _allAttributes.Any(a => attributeType.IsAssignableFrom(a.GetType())); + internal static T[] Clone(T[] source) + { + if (source is null) return null; + var result = new T[source.Length]; + source.CopyTo(result, 0); + return result; + } + public ParameterToken[] GetParameters() => Clone(_parameters); + public Type[] GetGenericArguments() => Clone(_genericArguments); + public string Name { get; } + public bool IsGenericMethodDefinition { get; } + public bool IsGenericMethod { get; } + public Type ReturnType { get; } + public object[] AllAttributes() => Clone(_allAttributes); + public MethodToken(MethodInfo source) + { + Name = source.Name; + IsGenericMethod = source.IsGenericMethod; + IsGenericMethodDefinition = source.IsGenericMethodDefinition; + ReturnType = source.ReturnType; + _genericArguments = (source.IsGenericMethod || source.IsGenericMethodDefinition) + ? source.GetGenericArguments() : null; + var ps = source.GetParameters(); + _parameters = ps is null ? null : Array.ConvertAll(ps, p => new ParameterToken(p)); + _allAttributes = source.AllAttributes(); + } + + public MethodToken(string name, Type returnType, ParameterToken[] parameters, + bool isGenericMethod, bool isGenericMethodDefinition, Type[] genericArguments, + params object[] allAttributes) + { + Name = name; + ReturnType = returnType; + IsGenericMethod = isGenericMethod; + IsGenericMethodDefinition = isGenericMethodDefinition; + _genericArguments = genericArguments; + _parameters = parameters ?? Array.Empty(); + _allAttributes = allAttributes ?? Array.Empty(); + } + } + + [TestCase(typeof(ICacheClient), typeof(ICacheClientAsync))] + [TestCase(typeof(ICacheClientExtended), typeof(ICacheClientAsync), typeof(BasicRedisClientManager))] // duplicate not an error; APIs are coalesced + [TestCase(typeof(IEntityStore), typeof(IEntityStoreAsync))] + [TestCase(typeof(IEntityStore<>), typeof(IEntityStoreAsync<>))] + [TestCase(typeof(IRedisClient), typeof(IRedisClientAsync))] + + [TestCase(typeof(IRedisClientsManager), typeof(IRedisClientsManagerAsync))] + [TestCase(typeof(IRedisNativeClient), typeof(IRedisNativeClientAsync))] + [TestCase(typeof(IRedisPipeline), typeof(IRedisPipelineAsync))] + [TestCase(typeof(IRedisPipelineShared), typeof(IRedisPipelineSharedAsync))] + [TestCase(typeof(IRedisQueueableOperation), typeof(IRedisQueueableOperationAsync))] + + [TestCase(typeof(IRedisQueueCompletableOperation), typeof(IRedisQueueCompletableOperationAsync))] + [TestCase(typeof(IRedisTransaction), typeof(IRedisTransactionAsync))] + [TestCase(typeof(IRedisTransactionBase), typeof(IRedisTransactionBaseAsync))] + [TestCase(typeof(IRedisTypedClient<>), typeof(IRedisTypedClientAsync<>))] + [TestCase(typeof(IRemoveByPattern), typeof(IRemoveByPatternAsync))] + + [TestCase(typeof(IDistributedLock), typeof(IDistributedLockAsync))] + [TestCase(typeof(IRedisSubscription), typeof(IRedisSubscriptionAsync))] + [TestCase(typeof(IRedisHash), typeof(IRedisHashAsync))] + [TestCase(typeof(IRedisSortedSet), typeof(IRedisSortedSetAsync))] + [TestCase(typeof(IRedisSet), typeof(IRedisSetAsync))] + + [TestCase(typeof(IRedisList), typeof(IRedisListAsync))] + [TestCase(typeof(IRedisHash<,>), typeof(IRedisHashAsync<,>))] + [TestCase(typeof(IRedisSortedSet<>), typeof(IRedisSortedSetAsync<>))] + [TestCase(typeof(IRedisSet<>), typeof(IRedisSetAsync<>))] + [TestCase(typeof(IRedisList<>), typeof(IRedisListAsync<>))] + + [TestCase(typeof(IRedisTypedPipeline<>), typeof(IRedisTypedPipelineAsync<>))] + [TestCase(typeof(IRedisTypedQueueableOperation<>), typeof(IRedisTypedQueueableOperationAsync<>))] + [TestCase(typeof(IRedisTypedTransaction<>), typeof(IRedisTypedTransactionAsync<>))] + public void TestFullyImplemented(Type syncInterface, Type asyncInterface, params Type[] ignore) + { + HashSet except = new HashSet(ignore ?? Type.EmptyTypes); +#if NET472 // only exists there! + if (syncInterface == typeof(IRedisClientsManager)) + { + except.Add(typeof(ServiceStack.Redis.Support.Diagnostic.TrackingRedisClientsManager)); + } +#endif + + var syncTypes = AllTypes.Except(except).Where(x => Implements(x, syncInterface)).ToArray(); + DumpTypes(syncInterface, syncTypes); + + var asyncTypes = AllTypes.Except(except).Where(x => Implements(x, asyncInterface)).ToArray(); + DumpTypes(asyncInterface, asyncTypes); + Assert.AreEqual(syncTypes, asyncTypes); + } + + static void DumpTypes(Type @interface, Type[] classes) + { + TestContext.Out.WriteLine($"Classes that implement {@interface.Name}: {classes.Length}:"); + foreach (var @class in classes) + { + TestContext.Out.WriteLine($" {@class.FullName}"); + } + TestContext.Out.WriteLine(); + } + + static bool Implements(Type @class, Type @interface) + { + if (@interface.IsGenericTypeDefinition) + { + var found = (from iType in @class.GetInterfaces() + where iType.IsGenericType + && iType.GetGenericTypeDefinition() == @interface + select iType).SingleOrDefault(); + return found != null && found.IsAssignableFrom(@class); + } + return @interface.IsAssignableFrom(@class); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.Async.cs b/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.Async.cs new file mode 100644 index 00000000..70685fd4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/BasicRediscClientManagerTests.Async.cs @@ -0,0 +1,43 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + public class BasicRediscClientManagerTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_select_db() + { + var redisManager = new BasicRedisClientManager("127.0.0.1"); + + await using (var client = await redisManager.GetClientAsync()) + { + await client.SelectAsync(2); + await client.SetAsync("db", 2); + } + + await using (var client = await redisManager.GetClientAsync()) + { + await client.SelectAsync(3); + await client.SetAsync("db", 3); + } + + await using (var client = await redisManager.GetClientAsync()) + { + await client.SelectAsync(2); + //((RedisClient)client).ChangeDb(2); + var db = await client.GetAsync("db"); + Assert.That(db, Is.EqualTo(2)); + } + + redisManager = new BasicRedisClientManager("127.0.0.1?db=3"); + await using (var client = await redisManager.GetClientAsync()) + { + var db = await client.GetAsync("db"); + Assert.That(db, Is.EqualTo(3)); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs b/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs index 8eeffb28..aa4598b1 100644 --- a/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs +++ b/tests/ServiceStack.Redis.Tests/Benchmarks/DoubleSerializationBenchmarks.cs @@ -8,7 +8,7 @@ namespace ServiceStack.Redis.Tests.Benchmarks { - [TestFixture, Explicit] + [TestFixture, Ignore("Benchmark")] public class DoubleSerializationBenchmarks { const int times = 100000; diff --git a/tests/ServiceStack.Redis.Tests/CacheClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/CacheClientTests.Async.cs new file mode 100644 index 00000000..e63437a0 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CacheClientTests.Async.cs @@ -0,0 +1,21 @@ +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class CacheClientTestsAsync + { + IRedisClientsManagerAsync redisManager = new RedisManagerPool(TestConfig.SingleHost); + + [Test] + public async Task Can_get_set_CacheClient_Async() + { + await using var cache = await redisManager.GetCacheClientAsync(); + await cache.FlushAllAsync(); + + await cache.SetAsync("key", "A"); + var result = await cache.GetAsync("key"); + Assert.That(result, Is.EqualTo("A")); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CacheClientTests.cs b/tests/ServiceStack.Redis.Tests/CacheClientTests.cs new file mode 100644 index 00000000..4d6f5b3d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CacheClientTests.cs @@ -0,0 +1,22 @@ +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class CacheClientTests + : RedisClientTestsBase + { + IRedisClientsManager redisManager = new RedisManagerPool(TestConfig.SingleHost); + + [Test] + public void Can_get_set_CacheClient() + { + var cache = redisManager.GetCacheClient(); + cache.FlushAll(); + + cache.Set("key", "A"); + var result = cache.Get("key"); + Assert.That(result, Is.EqualTo("A")); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ConfigTests.cs b/tests/ServiceStack.Redis.Tests/ConfigTests.cs index 9d0baac1..cc84948a 100644 --- a/tests/ServiceStack.Redis.Tests/ConfigTests.cs +++ b/tests/ServiceStack.Redis.Tests/ConfigTests.cs @@ -1,18 +1,19 @@ using NUnit.Framework; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests { [TestFixture] public class ConfigTests { - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { RedisConfig.VerifyMasterConnections = false; } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { RedisConfig.VerifyMasterConnections = true; } @@ -26,11 +27,14 @@ public void TestFixtureTearDown() [TestCase("host:1?password=pass&client=nunit", "{Host:host,Port:1,Client:nunit,Password:pass}")] [TestCase("host:1?db=2", "{Host:host,Port:1,Db:2}")] [TestCase("host?ssl=true", "{Host:host,Port:6380,Ssl:True}")] + [TestCase("host:6380?ssl=true&password=pass&sslprotocols=Tls12", "{Host:host,Port:6380,Ssl:True,Password:pass,SslProtocols:Tls12}")] [TestCase("host:1?ssl=true", "{Host:host,Port:1,Ssl:True}")] [TestCase("host:1?connectTimeout=1&sendtimeout=2&receiveTimeout=3&idletimeoutsecs=4", "{Host:host,Port:1,ConnectTimeout:1,SendTimeout:2,ReceiveTimeout:3,IdleTimeOutSecs:4}")] [TestCase("redis://nunit:pass@host:1?ssl=true&db=1&connectTimeout=2&sendtimeout=3&receiveTimeout=4&retryTimeout=5&idletimeoutsecs=5&NamespacePrefix=prefix.", "{Host:host,Port:1,Ssl:True,Client:nunit,Password:pass,Db:1,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,RetryTimeout:5,IdleTimeOutSecs:5,NamespacePrefix:prefix.}")] + [TestCase("redis://nunit:pass@host:1?ssl=true&sslprotocols=Tls12&db=1&connectTimeout=2&sendtimeout=3&receiveTimeout=4&retryTimeout=5&idletimeoutsecs=5&NamespacePrefix=prefix.", + "{Host:host,Port:1,Ssl:True,Client:nunit,Password:pass,SslProtocols:Tls12,Db:1,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,RetryTimeout:5,IdleTimeOutSecs:5,NamespacePrefix:prefix.}")] public void Does_handle_different_connection_strings_settings(string connString, string expectedJsv) { var actual = connString.ToRedisEndpoint(); @@ -54,6 +58,7 @@ public void Does_handle_different_connection_strings_settings(string connString, "host:1?ConnectTimeout=1&SendTimeout=2&ReceiveTimeout=3&IdleTimeOutSecs=4")] [TestCase("redis://nunit:pass@host:1?ssl=true&db=1&connectTimeout=2&sendtimeout=3&receiveTimeout=4&idletimeoutsecs=5&NamespacePrefix=prefix.", "host:1?Client=nunit&Password=pass&Db=1&Ssl=true&ConnectTimeout=2&SendTimeout=3&ReceiveTimeout=4&IdleTimeOutSecs=5&NamespacePrefix=prefix.")] + [TestCase("password@host:6380?ssl=true&sslprotocols=Tls12", "host:6380?Password=password&Ssl=true&SslProtocols=Tls12")] public void Does_Serialize_RedisEndpoint(string connString, string expectedString) { var actual = connString.ToRedisEndpoint(); @@ -63,8 +68,8 @@ public void Does_Serialize_RedisEndpoint(string connString, string expectedStrin [Test] public void Does_set_all_properties_on_Client_using_ClientsManagers() { - var connStr = "redis://nunit:pass@host:1?ssl=true&db=0&connectTimeout=2&sendtimeout=3&receiveTimeout=4&idletimeoutsecs=5&NamespacePrefix=prefix."; - var expected = "{Host:host,Port:1,Ssl:True,Client:nunit,Password:pass,Db:0,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,IdleTimeOutSecs:5,NamespacePrefix:prefix.}" + var connStr = "redis://nunit:pass@host:1?ssl=true&sslprotocols=Tls12&db=0&connectTimeout=2&sendtimeout=3&receiveTimeout=4&idletimeoutsecs=5&NamespacePrefix=prefix."; + var expected = "{Host:host,Port:1,Ssl:True,SslProtocols:Tls12,Client:nunit,Password:pass,Db:0,ConnectTimeout:2,SendTimeout:3,ReceiveTimeout:4,IdleTimeOutSecs:5,NamespacePrefix:prefix.}" .FromJsv(); using (var pooledManager = new RedisManagerPool(connStr)) @@ -81,6 +86,25 @@ public void Does_set_all_properties_on_Client_using_ClientsManagers() } } + [Test] + public void Does_encode_values_when_serializing_to_ConnectionString() + { + var config = new RedisEndpoint + { + Host = "host", + Port = 1, + Password = "p@55W0rd=" + }; + + var connString = config.ToString(); + Assert.That(connString, Is.EqualTo("host:1?Password=p%4055W0rd%3d")); + + var fromConfig = connString.ToRedisEndpoint(); + Assert.That(fromConfig.Host, Is.EqualTo(config.Host)); + Assert.That(fromConfig.Port, Is.EqualTo(config.Port)); + Assert.That(fromConfig.Password, Is.EqualTo(config.Password)); + } + private static void AssertClientManager(IRedisClientsManager redisManager, RedisEndpoint expected) { using (var readWrite = (RedisClient)redisManager.GetClient()) @@ -102,6 +126,7 @@ private static void AssertClient(RedisClient redis, RedisEndpoint expected) Assert.That(redis.Host, Is.EqualTo(expected.Host)); Assert.That(redis.Port, Is.EqualTo(expected.Port)); Assert.That(redis.Ssl, Is.EqualTo(expected.Ssl)); + Assert.That(redis.SslProtocols, Is.EqualTo(expected.SslProtocols)); Assert.That(redis.Client, Is.EqualTo(expected.Client)); Assert.That(redis.Password, Is.EqualTo(expected.Password)); Assert.That(redis.Db, Is.EqualTo(expected.Db)); diff --git a/tests/ServiceStack.Redis.Tests/CultureInfoTests.Async.cs b/tests/ServiceStack.Redis.Tests/CultureInfoTests.Async.cs new file mode 100644 index 00000000..12cc63f6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CultureInfoTests.Async.cs @@ -0,0 +1,47 @@ +using NUnit.Framework; +using System.Globalization; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class CultureInfoTestsAsync + : RedisClientTestsBaseAsync + { + private CultureInfo previousCulture = CultureInfo.InvariantCulture; + + [OneTimeSetUp] + public void OneTimeSetUp() + { +#if NETCORE + previousCulture = CultureInfo.CurrentCulture; + CultureInfo.CurrentCulture = new CultureInfo("fr-FR"); +#else + previousCulture = Thread.CurrentThread.CurrentCulture; + Thread.CurrentThread.CurrentCulture = new CultureInfo("fr-FR"); + Thread.CurrentThread.CurrentUICulture = new CultureInfo("fr-FR"); +#endif + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { +#if NETCORE + CultureInfo.CurrentCulture = previousCulture; +#else + Thread.CurrentThread.CurrentCulture = previousCulture; +#endif + } + + [Test] + public async Task Can_AddItemToSortedSet_in_different_Culture() + { + await RedisAsync.AddItemToSortedSetAsync("somekey1", "somevalue", 66121.202); + var score = await RedisAsync.GetItemScoreInSortedSetAsync("somekey1", "somevalue"); + + Assert.That(score, Is.EqualTo(66121.202)); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs b/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs index 59cb3ff6..897391ff 100644 --- a/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs +++ b/tests/ServiceStack.Redis.Tests/CultureInfoTests.cs @@ -10,8 +10,8 @@ public class CultureInfoTests { private CultureInfo previousCulture = CultureInfo.InvariantCulture; - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { #if NETCORE previousCulture = CultureInfo.CurrentCulture; @@ -23,8 +23,8 @@ public void TestFixtureSetUp() #endif } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { #if NETCORE CultureInfo.CurrentCulture = previousCulture; diff --git a/tests/ServiceStack.Redis.Tests/CustomCommandTests.Async.cs b/tests/ServiceStack.Redis.Tests/CustomCommandTests.Async.cs new file mode 100644 index 00000000..3033ed5c --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/CustomCommandTests.Async.cs @@ -0,0 +1,140 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Text; +using System; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class CustomCommandTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_send_custom_commands() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync("SET", "foo", 1); + Assert.That(ret.Text, Is.EqualTo("OK")); + _ = await RedisAsync.CustomAsync(Commands.Set, "bar", "b"); + + ret = await RedisAsync.CustomAsync("GET", "foo"); + Assert.That(ret.Text, Is.EqualTo("1")); + ret = await RedisAsync.CustomAsync(Commands.Get, "bar"); + Assert.That(ret.Text, Is.EqualTo("b")); + + ret = await RedisAsync.CustomAsync(Commands.Keys, "*"); + var keys = ret.GetResults(); + Assert.That(keys, Is.EquivalentTo(new[] { "foo", "bar" })); + + ret = await RedisAsync.CustomAsync("MGET", "foo", "bar"); + var values = ret.GetResults(); + Assert.That(values, Is.EquivalentTo(new[] { "1", "b" })); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + { + await RedisAsync.CustomAsync("RPUSH", "DaysOfWeek", x); + } + + ret = await RedisAsync.CustomAsync("LRANGE", "DaysOfWeek", 1, -2); + + var weekDays = ret.GetResults(); + Assert.That(weekDays, Is.EquivalentTo( + new[] { "Monday", "Tuesday", "Wednesday", "Thursday", "Friday" })); + + ret.PrintDump(); + } + + [Test] + public async Task Can_send_custom_commands_longhand() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync(new object[] { "SET", "foo", 1 }); + Assert.That(ret.Text, Is.EqualTo("OK")); + _ = await RedisAsync.CustomAsync(new object[] { Commands.Set, "bar", "b" }); + + ret = await RedisAsync.CustomAsync(new object[] { "GET", "foo" }); + Assert.That(ret.Text, Is.EqualTo("1")); + ret = await RedisAsync.CustomAsync(new object[] { Commands.Get, "bar" }); + Assert.That(ret.Text, Is.EqualTo("b")); + + ret = await RedisAsync.CustomAsync(new object[] { Commands.Keys, "*" }); + var keys = ret.GetResults(); + Assert.That(keys, Is.EquivalentTo(new[] { "foo", "bar" })); + + ret = await RedisAsync.CustomAsync(new object[] { "MGET", "foo", "bar" }); + var values = ret.GetResults(); + Assert.That(values, Is.EquivalentTo(new[] { "1", "b" })); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + { + await RedisAsync.CustomAsync(new object[] { "RPUSH", "DaysOfWeek", x }); + } + + ret = await RedisAsync.CustomAsync(new object[] { "LRANGE", "DaysOfWeek", 1, -2 }); + + var weekDays = ret.GetResults(); + Assert.That(weekDays, Is.EquivalentTo( + new[] { "Monday", "Tuesday", "Wednesday", "Thursday", "Friday" })); + + ret.PrintDump(); + } + + [Test] + public async Task Can_send_complex_types_in_Custom_Commands() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync("SET", "foo", new Poco { Name = "Bar" }); + Assert.That(ret.Text, Is.EqualTo("OK")); + + ret = await RedisAsync.CustomAsync("GET", "foo"); + var dto = ret.GetResult(); + Assert.That(dto.Name, Is.EqualTo("Bar")); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + await RedisAsync.CustomAsync("RPUSH", "DaysOfWeek", new Poco { Name = x }); + + ret = await RedisAsync.CustomAsync("LRANGE", "DaysOfWeek", 1, -2); + var weekDays = ret.GetResults(); + + Assert.That(weekDays.First().Name, Is.EqualTo("Monday")); + + ret.PrintDump(); + } + + [Test] + public async Task Can_send_complex_types_in_Custom_Commands_longhand() + { + await RedisAsync.FlushAllAsync(); + + RedisText ret; + + ret = await RedisAsync.CustomAsync(new object[] { "SET", "foo", new Poco { Name = "Bar" } }); + Assert.That(ret.Text, Is.EqualTo("OK")); + + ret = await RedisAsync.CustomAsync(new object[] { "GET", "foo" }); + var dto = ret.GetResult(); + Assert.That(dto.Name, Is.EqualTo("Bar")); + + foreach (var x in Enum.GetNames(typeof(DayOfWeek))) + await RedisAsync.CustomAsync(new object[] { "RPUSH", "DaysOfWeek", new Poco { Name = x } }); + + ret = await RedisAsync.CustomAsync(new object[] { "LRANGE", "DaysOfWeek", 1, -2 }); + var weekDays = ret.GetResults(); + + Assert.That(weekDays.First().Name, Is.EqualTo("Monday")); + + ret.PrintDump(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs b/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs index fef233e3..5d511398 100644 --- a/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs +++ b/tests/ServiceStack.Redis.Tests/DiagnosticTests.cs @@ -5,7 +5,7 @@ namespace ServiceStack.Redis.Tests { - [Explicit("Diagnostic only Integration Test")] + [Ignore("Diagnostic only Integration Test")] [TestFixture] public class DiagnosticTests { diff --git a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs index e8f9d58a..1cfa8e05 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostBestPractice.cs @@ -4,7 +4,6 @@ using System.Linq; using NUnit.Framework; using ServiceStack.Common; -using ServiceStack.Common; using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Examples.BestPractice @@ -274,7 +273,7 @@ public List Inject(IEnumerable entities) #endregion - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class BlogPostBestPractice { readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); diff --git a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs index b27e9895..d868ccd2 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BestPractice/BlogPostMigrations.cs @@ -52,7 +52,7 @@ public enum BlogPostType namespace ServiceStack.Redis.Tests.Examples.BestPractice { - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class BlogPostMigrations { readonly RedisClient redisClient = new RedisClient(TestConfig.SingleHost); diff --git a/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs b/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs index 8fd2d591..880fdbe7 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/BlogPostExample.cs @@ -64,7 +64,7 @@ public class BlogPostComment } - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class BlogPostExample { readonly RedisClient redis = new RedisClient(TestConfig.SingleHost); diff --git a/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs b/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs index 895253ae..92cf0e27 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/ServiceStack_Redis_UseCase.cs @@ -4,7 +4,7 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit] + [TestFixture, Ignore("Integration")] public class ServiceStack_Redis_UseCase { public class Todo diff --git a/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs b/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs index 7cac8860..d225a6c7 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/SimpleExamples.cs @@ -8,7 +8,7 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class SimpleExamples { readonly RedisClient redis = new RedisClient(TestConfig.SingleHost); diff --git a/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs b/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs index 950a4764..4abfc881 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/SimpleLocks.cs @@ -7,7 +7,7 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class SimpleLocks { [SetUp] diff --git a/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs b/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs index 11386d53..207f8d7c 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/SimplePubSub.cs @@ -6,15 +6,15 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class SimplePubSub { const string ChannelName = "SimplePubSubCHANNEL"; const string MessagePrefix = "MESSAGE "; const int PublishMessageCount = 5; - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { using (var redis = new RedisClient(TestConfig.SingleHost)) { diff --git a/tests/ServiceStack.Redis.Tests/Examples/TestData.cs b/tests/ServiceStack.Redis.Tests/Examples/TestData.cs index 2c242347..9638709f 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/TestData.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/TestData.cs @@ -4,7 +4,7 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit] + [TestFixture, Ignore("Integration")] public class TestData : RedisClientTestsBase { diff --git a/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs b/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs index ede76748..393ddc3e 100644 --- a/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs +++ b/tests/ServiceStack.Redis.Tests/Examples/TodoApp.cs @@ -2,7 +2,7 @@ namespace ServiceStack.Redis.Tests.Examples { - [TestFixture, Explicit, Category("Integration")] + [TestFixture, Ignore("Integration"), Category("Integration")] public class TodoApp { [SetUp] diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.Async.cs new file mode 100644 index 00000000..1889d193 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsBase.Async.cs @@ -0,0 +1,250 @@ +using System.Collections.Generic; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System.Linq; +using System.Threading.Tasks; +using System; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public abstract class RedisClientHashTestsBaseAsync + { + private const string HashId = "testhash"; + + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + private IRedisHashAsync Hash; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + await client.FlushAllAsync(); + + redis = client.As(); + + Hash = redis.GetHash(HashId); + } + + private Dictionary CreateMap() + { + var listValues = Factory.CreateList(); + var map = new Dictionary(); + listValues.ForEach(x => map[x.ToString()] = x); + return map; + } + + private Dictionary CreateMap2() + { + var listValues = Factory.CreateList2(); + var map = new Dictionary(); + listValues.ForEach(x => map[x.ToString()] = x); + return map; + } + + [Test] + public async Task Can_SetItemInHash_and_GetAllFromHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_RemoveFromHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var firstKey = mapValues.First().Key; + + await redis.RemoveEntryFromHashAsync(Hash, firstKey); + + mapValues.Remove(firstKey); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_GetItemFromHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var firstKey = mapValues.First().Key; + + var hashValue = await redis.GetValueFromHashAsync(Hash, firstKey); + + Assert.That(hashValue, Is.EqualTo(mapValues[firstKey])); + } + + [Test] + public async Task Can_GetHashCount() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var hashCount = await redis.GetHashCountAsync(Hash); + + Assert.That(hashCount, Is.EqualTo(mapValues.Count)); + } + + [Test] + public async Task Does_HashContainsKey() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + Assert.That(await redis.HashContainsEntryAsync(Hash, existingMember), Is.True); + Assert.That(await redis.HashContainsEntryAsync(Hash, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_GetHashKeys() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var expectedKeys = mapValues.Map(x => x.Key); + + var hashKeys = await redis.GetHashKeysAsync(Hash); + + Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); + } + + [Test] + public async Task Can_GetHashValues() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var expectedValues = mapValues.Map(x => x.Value); + + var hashValues = await redis.GetHashValuesAsync(Hash); + + Assert.That(hashValues, Is.EquivalentTo(expectedValues)); + } + + [Test] + public async Task Can_enumerate_small_IDictionary_Hash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var members = new List(); + await foreach (var item in redis.GetHash(HashId)) + { + Assert.That(mapValues.ContainsKey(item.Key), Is.True); + members.Add(item.Key); + } + Assert.That(members.Count, Is.EqualTo(mapValues.Count)); + } + + [Test] + public async Task Can_Add_to_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_Clear_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + Assert.That(await hash.CountAsync(), Is.EqualTo(mapValues.Count)); + + await hash.ClearAsync(); + + Assert.That(await hash.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + Assert.That(await hash.ContainsKeyAsync(existingMember), Is.True); + Assert.That(await hash.ContainsKeyAsync(nonExistingMember), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IDictionary_Hash() + { + var hash = redis.GetHash(HashId); + var mapValues = CreateMap(); + await mapValues.ForEachAsync((k, v) => hash.AddAsync(k, v)); + + var firstKey = mapValues.First().Key; + mapValues.Remove(firstKey); + await hash.RemoveAsync(firstKey); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_SetItemInHashIfNotExists() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var existingMember = mapValues.First().Key; + var nonExistingMember = existingMember + "notexists"; + + var lastValue = mapValues.Last().Value; + + await redis.SetEntryInHashIfNotExistsAsync(Hash, existingMember, lastValue); + await redis.SetEntryInHashIfNotExistsAsync(Hash, nonExistingMember, lastValue); + + mapValues[nonExistingMember] = lastValue; + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + + [Test] + public async Task Can_SetRangeInHash() + { + var mapValues = CreateMap(); + await mapValues.ForEachAsync(async (k, v) => await redis.SetEntryInHashAsync(Hash, k, v)); + + var newMapValues = CreateMap2(); + + await redis.SetRangeInHashAsync(Hash, newMapValues); + + newMapValues.Each(x => mapValues[x.Key] = x.Value); + + var members = await redis.GetAllEntriesFromHashAsync(Hash); + Assert.That(members, Is.EquivalentTo(mapValues)); + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.Async.cs new file mode 100644 index 00000000..0e7d7216 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientHashTestsModels.Async.cs @@ -0,0 +1,80 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Support; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisClientHashTestsModelWithFieldsOfDifferentTypesAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsStringAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsShipperAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsIntAsync + : RedisClientHashTestsBaseAsync + { + private readonly IModelFactory factory = new IntFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientHashTestsCustomTypeAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + //public class RedisClientHashTestsDateTimeAsync + // : RedisClientHashTestsBaseAsync + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); + + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.Async.cs new file mode 100644 index 00000000..42f92e1e --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestExtra.Async.cs @@ -0,0 +1,59 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using ServiceStack.Redis.Tests.Support; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public class RedisClientListTestExtraAsync + { + const string ListId = "testlist"; + // const string ListId2 = "testlist2"; + private IRedisListAsync List; + // private IRedisListAsync List2; + + + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected IModelFactory Factory { get { return factory; } } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + await client.FlushAllAsync(); + + redis = client.As(); + + List = redis.Lists[ListId]; + // List2 = redis.Lists[ListId2]; + } + + [Test] + public async Task Can_Remove_value_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + var equalItem = new CustomType() { CustomId = 4 }; + storeMembers.Remove(equalItem); + await List.RemoveAsync(equalItem); + + var members = await List.ToListAsync(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.Async.cs new file mode 100644 index 00000000..58e42b5d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsBase.Async.cs @@ -0,0 +1,331 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public abstract class RedisClientListTestsBaseAsync + { + const string ListId = "testlist"; + const string ListId2 = "testlist2"; + private IRedisListAsync List; + private IRedisListAsync List2; + + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost); + await client.FlushAllAsync(); + + redis = client.As(); + + List = redis.Lists[ListId]; + List2 = redis.Lists[ListId2]; + } + + [Test] + public async Task Can_AddToList_and_GetAllFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var members = await redis.GetAllItemsFromListAsync(List); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_GetListCount() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var listCount = await redis.GetListCountAsync(List); + + Assert.That(listCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Can_GetItemFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var storeMember3 = storeMembers[2]; + var item3 = await redis.GetItemFromListAsync(List, 2); + + Factory.AssertIsEqual(item3, storeMember3); + } + + [Test] + public async Task Can_SetItemInList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + storeMembers[2] = Factory.NonExistingValue; + await redis.SetItemInListAsync(List, 2, Factory.NonExistingValue); + + var members = await redis.GetAllItemsFromListAsync(List); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_PopFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var lastValue = await redis.PopItemFromListAsync(List); + + Factory.AssertIsEqual(lastValue, storeMembers[storeMembers.Count - 1]); + } + + [Test] + public async Task Can_BlockingDequeueItemFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var item1 = await redis.BlockingDequeueItemFromListAsync(List, new TimeSpan(0, 0, 1)); + + Factory.AssertIsEqual(item1, (T)storeMembers.First()); + } + + [Test] + public async Task Can_BlockingDequeueItemFromList_Timeout() + { + var item1 = await redis.BlockingDequeueItemFromListAsync(List, new TimeSpan(0, 0, 1)); + Assert.AreEqual(item1, default(T)); + } + + [Test] + public async Task Can_DequeueFromList() + { + + var queue = new Queue(); + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => queue.Enqueue(x)); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var item1 = await redis.DequeueItemFromListAsync(List); + + Factory.AssertIsEqual(item1, queue.Dequeue()); + } + + [Test] + public async Task PopAndPushSameAsDequeue() + { + var queue = new Queue(); + var storeMembers = Factory.CreateList(); + storeMembers.ForEach(x => queue.Enqueue(x)); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var item1 = await redis.PopAndPushItemBetweenListsAsync(List, List2); + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task Can_ClearList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.EnqueueItemOnListAsync(List, x)); + + var count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(storeMembers.Count)); + + await redis.RemoveAllFromListAsync(List); + count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(0)); + + } + + [Test] + public async Task Can_ClearListWithOneItem() + { + var storeMembers = Factory.CreateList(); + await redis.EnqueueItemOnListAsync(List, storeMembers[0]); + + var count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(1)); + + await redis.RemoveAllFromListAsync(List); + count = (await redis.GetAllItemsFromListAsync(List)).Count; + Assert.That(count, Is.EqualTo(0)); + } + + [Test] + public async Task Can_MoveBetweenLists() + { + var list1Members = Factory.CreateList(); + var list2Members = Factory.CreateList2(); + var lastItem = list1Members[list1Members.Count - 1]; + + await list1Members.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + await list2Members.ForEachAsync(x => redis.AddItemToListAsync(List2, x)); + + list1Members.Remove(lastItem); + list2Members.Insert(0, lastItem); + await redis.PopAndPushItemBetweenListsAsync(List, List2); + + var readList1 = await redis.GetAllItemsFromListAsync(List); + var readList2 = await redis.GetAllItemsFromListAsync(List2); + + Factory.AssertListsAreEqual(readList1, list1Members); + Factory.AssertListsAreEqual(readList2, list2Members); + } + + + [Test] + public async Task Can_enumerate_small_list() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + var readMembers = new List(); + await foreach (var item in redis.Lists[ListId]) + { + readMembers.Add(item); + } + Factory.AssertListsAreEqual(readMembers, storeMembers); + } + + [Test] + public async Task Can_enumerate_large_list() + { + if (TestConfig.IgnoreLongTests) return; + + const int listSize = 2500; + + await listSize.TimesAsync(x => redis.AddItemToListAsync(List, Factory.CreateInstance(x))); + + var i = 0; + await foreach (var item in List) + { + Factory.AssertIsEqual(item, Factory.CreateInstance(i++)); + } + } + + [Test] + public async Task Can_Add_to_IList() + { + var storeMembers = Factory.CreateList(); + var list = redis.Lists[ListId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + var members = await list.ToListAsync(); + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_Clear_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + Assert.That(await List.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await List.ClearAsync(); + + Assert.That(await List.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + Assert.That(await List.ContainsAsync(Factory.ExistingValue), Is.True); + Assert.That(await List.ContainsAsync(Factory.NonExistingValue), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + storeMembers.Remove(Factory.ExistingValue); + await List.RemoveAsync(Factory.ExistingValue); + + var members = await List.ToListAsync(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_RemoveAt_value_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + storeMembers.RemoveAt(2); + await List.RemoveAtAsync(2); + + var members = await List.ToListAsync(); + + Factory.AssertListsAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_get_default_index_from_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + for (var i = 0; i < storeMembers.Count; i++) + { + Factory.AssertIsEqual(await List.ElementAtAsync(i), storeMembers[i]); + } + } + + [Test] + public async Task Can_test_for_IndexOf_in_IList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => List.AddAsync(x)); + + foreach (var item in storeMembers) + { + Assert.That(await List.IndexOfAsync(item), Is.EqualTo(storeMembers.IndexOf(item))); + } + } + + + [Test] + public async Task Can_GetRangeFromList() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(x => redis.AddItemToListAsync(List, x)); + + //in SetUp(): List = redis.Lists["testlist"]; + //alias for: redis.GetRangeFromList(redis.Lists["testlist"], 1, 3); + var range = await List.GetRangeAsync(1, 3); + var expected = storeMembers.Skip(1).Take(3).ToList(); + + //Uncomment to view list contents + //Debug.WriteLine(range.Dump()); + //Debug.WriteLine(expected.Dump()); + + Factory.AssertListsAreEqual(range, expected); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.Async.cs new file mode 100644 index 00000000..00d6e80f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.Async.cs @@ -0,0 +1,91 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Support; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisClientListTestsModelWithFieldsOfDifferentTypesAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsStringAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsShipperAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsIntAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new IntFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientListTestsCustomTypeAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientlistTestCustomType_FailingAsync + : RedisClientListTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + //public class RedisClientListTestsDateTimeAsync + // : RedisClientListTestsBaseAsync + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); + + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs index d8c0be73..5d8e0474 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientListTestsModels.cs @@ -5,6 +5,7 @@ namespace ServiceStack.Redis.Tests.Generic { + // TODO: error, missing fixture? public class RedisClientListTestsModelWithFieldsOfDifferentTypes : RedisClientListTestsBase { diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.Async.cs new file mode 100644 index 00000000..7f3028fd --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsBase.Async.cs @@ -0,0 +1,344 @@ +using System.Collections.Generic; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System.Linq; +using System.Threading.Tasks; +using System; + +namespace ServiceStack.Redis.Tests.Generic +{ + [Category("Async")] + public abstract class RedisClientSetTestsBaseAsync + { + private const string SetId = "testset"; + private const string SetId2 = "testset2"; + private const string SetId3 = "testset3"; + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + private IRedisSetAsync Set; + private IRedisSetAsync Set2; + private IRedisSetAsync Set3; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await client.FlushAllAsync(); + + redis = client.As(); + + Set = redis.Sets[SetId]; + Set2 = redis.Sets[SetId2]; + Set3 = redis.Sets[SetId3]; + } + + [Test] + public async Task Can_AddToSet_and_GetAllFromSet() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var members = await redis.GetAllItemsFromSetAsync(Set); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveFromSet() + { + var storeMembers = Factory.CreateList(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + await redis.RemoveItemFromSetAsync(Set, Factory.ExistingValue); + + storeMembers.Remove(Factory.ExistingValue); + + var members = await redis.GetAllItemsFromSetAsync(Set); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_PopFromSet() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var member = await redis.PopItemFromSetAsync(Set); + + Assert.That(storeMembers.Contains(member), Is.True); + } + + [Test] + public async Task Can_MoveBetweenSets() + { + var fromSetMembers = Factory.CreateList(); + var toSetMembers = Factory.CreateList2(); + + await fromSetMembers.ForEachAsync(x => redis.AddItemToSetAsync(Set, x)); + await toSetMembers.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + await redis.MoveBetweenSetsAsync(Set, Set2, Factory.ExistingValue); + + fromSetMembers.Remove(Factory.ExistingValue); + toSetMembers.Add(Factory.ExistingValue); + + var readFromSetId = await redis.GetAllItemsFromSetAsync(Set); + var readToSetId = await redis.GetAllItemsFromSetAsync(Set2); + + Assert.That(readFromSetId, Is.EquivalentTo(fromSetMembers)); + Assert.That(readToSetId, Is.EquivalentTo(toSetMembers)); + } + + [Test] + public async Task Can_GetSetCount() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var setCount = await redis.GetSetCountAsync(Set); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Does_SetContainsValue() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + Assert.That(await redis.SetContainsItemAsync(Set, Factory.ExistingValue), Is.True); + Assert.That(await redis.SetContainsItemAsync(Set, Factory.NonExistingValue), Is.False); + } + + [Test] + public async Task Can_IntersectBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + storeMembers.Add(storeMembers2.First()); + storeMembers2.Add(storeMembers.First()); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + var intersectingMembers = await redis.GetIntersectFromSetsAsync(new[] { Set, Set2 }); + + var intersect = (await Set.ToListAsync()).Intersect((await Set2.ToListAsync())).ToList(); + + Assert.That(intersectingMembers, Is.EquivalentTo(intersect)); + } + + [Test] + public async Task Can_Store_IntersectBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + await redis.StoreIntersectFromSetsAsync(Set3, new[] { Set, Set2 }); + + var intersect = (await Set.ToListAsync()).Intersect(await Set2.ToListAsync()).ToList(); + + Assert.That(await Set3.ToListAsync(), Is.EquivalentTo(intersect)); + } + + [Test] + public async Task Can_UnionBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + var unionMembers = await redis.GetUnionFromSetsAsync(new[] { Set, Set2 }); + + var union = (await Set.ToListAsync()).Union(await Set2.ToListAsync()).ToList(); + + Assert.That(unionMembers, Is.EquivalentTo(union)); + } + + [Test] + public async Task Can_Store_UnionBetweenSets() + { + var storeMembers = Factory.CreateList(); + var storeMembers2 = Factory.CreateList2(); + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + + await redis.StoreUnionFromSetsAsync(Set3, new[] { Set, Set2 }); + + var union = (await Set.ToListAsync()).Union((await Set2.ToListAsync())).ToList(); + + Assert.That(await Set3.ToListAsync(), Is.EquivalentTo(union)); + } + + [Test] + public async Task Can_DiffBetweenSets() + { + var storeMembers = Factory.CreateList(); + storeMembers.Add(Factory.CreateInstance(1)); + + var storeMembers2 = Factory.CreateList2(); + storeMembers2.Insert(0, Factory.CreateInstance(4)); + + var storeMembers3 = new List { + Factory.CreateInstance(1), + Factory.CreateInstance(5), + Factory.CreateInstance(7), + Factory.CreateInstance(11), + }; + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + await storeMembers3.ForEachAsync(x => redis.AddItemToSetAsync(Set3, x)); + + var diffMembers = await redis.GetDifferencesFromSetAsync(Set, new[] { Set2, Set3 }); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); + } + + [Test] + public async Task Can_Store_DiffBetweenSets() + { + var storeMembers = Factory.CreateList(); + storeMembers.Add(Factory.CreateInstance(1)); + + var storeMembers2 = Factory.CreateList2(); + storeMembers2.Insert(0, Factory.CreateInstance(4)); + + var storeMembers3 = new List { + Factory.CreateInstance(1), + Factory.CreateInstance(5), + Factory.CreateInstance(7), + Factory.CreateInstance(11), + }; + + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + await storeMembers2.ForEachAsync(x => redis.AddItemToSetAsync(Set2, x)); + await storeMembers3.ForEachAsync(x => redis.AddItemToSetAsync(Set3, x)); + + var storeSet = redis.Sets["testdiffsetstore"]; + + await redis.StoreDifferencesFromSetAsync(storeSet, Set, new[] { Set2, Set3 }); + + Assert.That(await storeSet.ToListAsync(), Is.EquivalentTo( + new List { Factory.CreateInstance(2), Factory.CreateInstance(3) })); + + } + + [Test] + public async Task Can_GetRandomEntryFromSet() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var randomEntry = await redis.GetRandomItemFromSetAsync(Set); + + Assert.That(storeMembers.Contains(randomEntry), Is.True); + } + + + [Test] + public async Task Can_enumerate_small_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var members = new List(); + await foreach (var item in Set) + { + members.Add(item); + } + + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + var storeMembers = new List(); + await setSize.TimesAsync(async x => + { + await redis.AddItemToSetAsync(Set, Factory.CreateInstance(x)); + storeMembers.Add(Factory.CreateInstance(x)); + }); + + var members = new List(); + await foreach (var item in Set) + { + members.Add(item); + } + members.Sort((x, y) => x.GetId().ToString().CompareTo(y.GetId().ToString())); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + var members = await Set.ToListAsync(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Clear_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + Assert.That(await Set.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await Set.ClearAsync(); + + Assert.That(await Set.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + Assert.That(await Set.ContainsAsync(Factory.ExistingValue), Is.True); + Assert.That(await Set.ContainsAsync(Factory.NonExistingValue), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_ICollection_Set() + { + var storeMembers = Factory.CreateList(); + await storeMembers.ForEachAsync(async x => await redis.AddItemToSetAsync(Set, x)); + + storeMembers.Remove(Factory.ExistingValue); + await Set.RemoveAsync(Factory.ExistingValue); + + var members = await Set.ToListAsync(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.Async.cs new file mode 100644 index 00000000..9eb33a51 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientSetTestsModels.Async.cs @@ -0,0 +1,81 @@ +using System; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Tests.Support; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisClientSetTestsModelWithFieldsOfDifferentTypesAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsStringAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsShipperAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsIntAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new IntFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + [TestFixture] + public class RedisClientSetTestsCustomTypeAsync + : RedisClientSetTestsBaseAsync + { + private readonly IModelFactory factory = new CustomTypeFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + + //public class RedisClientSetTestsDateTimeAsync + // : RedisClientSetTestsBaseAsync + //{ + // private readonly IModelFactory factory = new DateTimeFactory(); + + // protected override IModelFactory Factory + // { + // get { return factory; } + // } + //} + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.Async.cs new file mode 100644 index 00000000..a553c572 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.Async.cs @@ -0,0 +1,129 @@ +using NUnit.Framework; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Integration"), Category("Async")] + public class RedisClientTestsAsync : RedisClientTestsBaseAsync + { + [OneTimeSetUp] + public void TestFixture() + { + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "GenericRedisClientTests"; + } + + [Test] + public async Task Can_Set_and_Get_string() + { + const string value = "value"; + await RedisAsync.SetValueAsync("key", value); + var valueString = await RedisAsync.GetValueAsync("key"); + + Assert.That(valueString, Is.EqualTo(value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + await NativeAsync.SetAsync(key, value); + var resultValue = await NativeAsync.GetAsync(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + public List Sort(IEnumerable list) + { + var sortedList = list.ToList(); + sortedList.Sort((x, y) => + x.GetId().ToString().CompareTo(y.GetId().ToString())); + + return sortedList; + } + + [Test] + public async Task Can_SetBit_And_GetBit_And_BitCount() + { + const string key = "BitKey"; + const int offset = 100; + await NativeAsync.SetBitAsync(key, offset, 1); + Assert.AreEqual(1, await NativeAsync.GetBitAsync(key, offset)); + Assert.AreEqual(1, await NativeAsync.BitCountAsync(key)); + } + + public class Dummy + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public async Task Can_Delete() + { + var dto = new Dummy { Id = 1, Name = "Name" }; + + await RedisAsync.StoreAsync(dto); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).ToArray()[0], Is.EqualTo("1")); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Not.Null); + + await RedisAsync.DeleteAsync(dto); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).Count, Is.EqualTo(0)); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Null); + } + + [Test] + public async Task Can_DeleteById() + { + var dto = new Dummy { Id = 1, Name = "Name" }; + await RedisAsync.StoreAsync(dto); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).ToArray()[0], Is.EqualTo("1")); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Not.Null); + + await RedisAsync.DeleteByIdAsync(dto.Id); + + Assert.That((await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + "ids:Dummy")).Count, Is.EqualTo(0)); + Assert.That(await RedisAsync.GetByIdAsync(1), Is.Null); + } + + [Test] + public async Task Can_save_via_string() + { + var dtos = 10.Times(i => new Dummy { Id = i, Name = "Name" + i }); + + await RedisAsync.SetValueAsync("dummy:strings", dtos.ToJson()); + + var fromDtos = (await RedisAsync.GetValueAsync("dummy:strings")).FromJson>(); + + Assert.That(fromDtos.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_save_via_types() + { + var dtos = 10.Times(i => new Dummy { Id = i, Name = "Name" + i }); + + await RedisAsync.SetAsync("dummy:strings", dtos); + + var fromDtos = await RedisAsync.GetAsync>("dummy:strings"); + + Assert.That(fromDtos.Count, Is.EqualTo(10)); + } + } +} diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs index c82639de..57f1c238 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisClientTests.cs @@ -8,7 +8,7 @@ namespace ServiceStack.Redis.Tests.Generic [TestFixture, Category("Integration")] public class RedisClientTests : RedisClientTestsBase { - [TestFixtureSetUp] + [OneTimeSetUp] public void TestFixture() { } diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.Async.cs new file mode 100644 index 00000000..92880411 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBase.Async.cs @@ -0,0 +1,97 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Async")] + public abstract class RedisPersistenceProviderTestsBaseAsync + { + protected abstract IModelFactory Factory { get; } + + private IRedisClientAsync client; + private IRedisTypedClientAsync redis; + + [SetUp] + public async Task SetUp() + { + if (client is object) + { + await client.DisposeAsync(); + client = null; + } + client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await client.FlushAllAsync(); + + redis = client.As(); + } + + [Test] + public async Task Can_Store_and_GetById_ModelWithIdAndName() + { + const int modelId = 1; + var to = Factory.CreateInstance(modelId); + await redis.StoreAsync(to); + + var from = await redis.GetByIdAsync(to.GetId().ToString()); + + Factory.AssertIsEqual(to, from); + } + + [Test] + public async Task Can_StoreAll_and_GetByIds_ModelWithIdAndName() + { + var tos = Factory.CreateList(); + var ids = tos.ConvertAll(x => x.GetId().ToString()); + + await redis.StoreAllAsync(tos); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.GetId().ToString()); + + Assert.That(fromIds, Is.EquivalentTo(ids)); + } + + [Test] + public async Task Can_Delete_ModelWithIdAndName() + { + var tos = Factory.CreateList(); + var ids = tos.ConvertAll(x => x.GetId().ToString()); + + await redis.StoreAllAsync(tos); + + var deleteIds = new[] { ids[1], ids[3] }; + + await redis.DeleteByIdsAsync(deleteIds); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.GetId().ToString()); + + var expectedIds = ids.Where(x => !deleteIds.Contains(x)) + .ToList().ConvertAll(x => x.ToString()); + + Assert.That(fromIds, Is.EquivalentTo(expectedIds)); + } + + [Test] + public async Task Can_DeleteAll() + { + var tos = Factory.CreateList(); + await redis.StoreAllAsync(tos); + + var all = await redis.GetAllAsync(); + + Assert.That(all.Count, Is.EqualTo(tos.Count)); + + await redis.DeleteAllAsync(); + + all = await redis.GetAllAsync(); + + Assert.That(all.Count, Is.EqualTo(0)); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.Async.cs new file mode 100644 index 00000000..3f7c00cf --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisPersistenceProviderTestsBaseImpl.Async.cs @@ -0,0 +1,40 @@ +using System; +using ServiceStack.Common.Tests.Models; + +namespace ServiceStack.Redis.Tests.Generic +{ + public class RedisPersistenceProviderTestsModelWithFieldsOfDifferentTypesAsync + : RedisPersistenceProviderTestsBaseAsync + { + private readonly IModelFactory factory = + new ModelWithFieldsOfDifferentTypesFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + public class RedisPersistenceProviderTestsStringFactoryAsync + : RedisPersistenceProviderTestsBaseAsync + { + private readonly IModelFactory factory = new BuiltInsFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + + public class RedisPersistenceProviderTestsShipperAsync + : RedisPersistenceProviderTestsBaseAsync + { + private readonly IModelFactory factory = new ShipperFactory(); + + protected override IModelFactory Factory + { + get { return factory; } + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs index 2111837b..89618a1e 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientAppTests.cs @@ -1,6 +1,5 @@ using System.Collections.Generic; using NUnit.Framework; -using ServiceStack.Common; using ServiceStack.Redis.Generic; namespace ServiceStack.Redis.Tests.Generic @@ -229,5 +228,34 @@ public void Can_GetEarliestFromRecentsList() Assert.That(expectedAnswers.EquivalentTo(earliest3Answers)); } + [Test] + public void Can_save_quoted_strings() + { + var str = "string \"with\" \"quotes\""; + var cacheKey = "quotetest"; + + Redis.As().SetValue(cacheKey, str); + var fromRedis = Redis.As().GetValue(cacheKey); + Assert.That(fromRedis, Is.EqualTo(str)); + + Redis.Set(cacheKey, str); + fromRedis = Redis.Get(cacheKey); + Assert.That(fromRedis, Is.EqualTo(str)); + + Redis.SetValue(cacheKey, str); + fromRedis = Redis.GetValue(cacheKey); + Assert.That(fromRedis, Is.EqualTo(str)); + + Redis.SetValue(cacheKey, str.ToJson()); + fromRedis = Redis.GetValue(cacheKey).FromJson(); + Assert.That(fromRedis, Is.EqualTo(str)); + } + + [Test] + public void Does_return_non_existent_keys_as_defaultValue() + { + Assert.That(Redis.Get("notexists"), Is.Null); + Assert.That(Redis.Get("notexists"), Is.EqualTo(0)); + } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.Async.cs new file mode 100644 index 00000000..7224d6c5 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.Async.cs @@ -0,0 +1,167 @@ +using NUnit.Framework; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture, Category("Integration")] + public class RedisTypedClientTestsAsync + : RedisClientTestsBaseAsync + { + public class CacheRecord + { + public CacheRecord() + { + this.Children = new List(); + } + + public string Id { get; set; } + public List Children { get; set; } + } + + public class CacheRecordChild + { + public string Id { get; set; } + public string Data { get; set; } + } + + protected IRedisTypedClientAsync RedisTyped; + + [SetUp] + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + RedisRaw?.Dispose(); + RedisRaw = new RedisClient(TestConfig.SingleHost) + { + NamespacePrefix = "RedisTypedClientTests:" + }; + RedisTyped = RedisAsync.As(); + } + + [TearDown] + public virtual async Task TearDown() + { + foreach (var t in await RedisAsync.SearchKeysAsync(RedisRaw.NamespacePrefix + "*")) + { + await NativeAsync.DelAsync(t); + } + } + + [Test] + public async Task Can_Store_with_Prefix() + { + var expected = new CacheRecord() { Id = "123" }; + await RedisTyped.StoreAsync(expected); + var current = await RedisAsync.GetAsync("RedisTypedClientTests:urn:cacherecord:123"); + Assert.AreEqual(expected.Id, current.Id); + } + + [Test] + public async Task Can_Expire() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + await RedisTyped.StoreAsync(cachedRecord); + await RedisTyped.ExpireInAsync("key", TimeSpan.FromSeconds(1)); + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + await Task.Delay(2000); + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + } + + [Ignore("Changes in system clock can break test")] + [Test] + public async Task Can_ExpireAt() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + await RedisTyped.StoreAsync(cachedRecord); + + var in2Secs = DateTime.UtcNow.AddSeconds(2); + + await RedisTyped.ExpireAtAsync("key", in2Secs); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + await Task.Delay(3000); + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + } + + [Test] + public async Task Can_Delete_All_Items() + { + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + await RedisTyped.StoreAsync(cachedRecord); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + + await RedisTyped.DeleteAllAsync(); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + + } + + [Test] + public async Task Can_Delete_All_Items_multiple_batches() + { + // Clear previous usage + await RedisAsync.DeleteAsync(RedisRaw.GetTypeIdsSetKey()); + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + var exists = RedisRaw.Exists(RedisRaw.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + + await RedisTyped.StoreAsync(cachedRecord); + + exists = RedisRaw.Exists(RedisRaw.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(1)); + + RedisConfig.CommandKeysBatchSize = 5; + + for (int i = 0; i < 50; i++) + { + cachedRecord.Id = "key" + i; + await RedisTyped.StoreAsync(cachedRecord); + } + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Not.Null); + + await RedisTyped.DeleteAllAsync(); + + Assert.That(await RedisTyped.GetByIdAsync("key"), Is.Null); + + exists = RedisRaw.Exists(RedisRaw.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + + RedisConfig.Reset(); + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs index 333ba07f..18236cf2 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedClientTests.cs @@ -28,20 +28,14 @@ public class CacheRecordChild public string Data { get; set; } } - protected RedisClient Redis; protected IRedisTypedClient RedisTyped; - protected void Log(string fmt, params object[] args) - { - Debug.WriteLine("{0}", string.Format(fmt, args).Trim()); - } - [SetUp] - public virtual void OnBeforeEachTest() + public override void OnBeforeEachTest() { base.OnBeforeEachTest(); - if (Redis != null) Redis.Dispose(); + Redis?.Dispose(); Redis = new RedisClient(TestConfig.SingleHost) { NamespacePrefix = "RedisTypedClientTests:" @@ -82,7 +76,7 @@ public void Can_Expire() Assert.That(RedisTyped.GetById("key"), Is.Null); } - [Explicit("Changes in system clock can break test")] + [Ignore("Changes in system clock can break test")] [Test] public void Can_ExpireAt() { @@ -123,7 +117,47 @@ public void Can_Delete_All_Items() RedisTyped.DeleteAll(); Assert.That(RedisTyped.GetById("key"), Is.Null); + } + + [Test] + public void Can_Delete_All_Items_multiple_batches() + { + // Clear previous usage + Redis.Delete(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + var cachedRecord = new CacheRecord + { + Id = "key", + Children = { + new CacheRecordChild { Id = "childKey", Data = "data" } + } + }; + + var exists = Redis.Exists(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + + RedisTyped.Store(cachedRecord); + + exists = Redis.Exists(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + + Assert.That(exists, Is.EqualTo(1)); + + RedisConfig.CommandKeysBatchSize = 5; + + for (int i = 0; i < 50; i++) + { + cachedRecord.Id = "key" + i; + RedisTyped.Store(cachedRecord); + } + + Assert.That(RedisTyped.GetById("key"), Is.Not.Null); + + RedisTyped.DeleteAll(); + + exists = Redis.Exists(Redis.GetTypeIdsSetKey(typeof(CacheRecord))); + Assert.That(exists, Is.EqualTo(0)); + Assert.That(RedisTyped.GetById("key"), Is.Null); + RedisConfig.Reset(); } } diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.Async.cs new file mode 100644 index 00000000..663fdf97 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.Async.cs @@ -0,0 +1,230 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisTypedPipelineTestsAsync + : RedisClientTestsBaseAsync + { + public RedisTypedPipelineTestsAsync() + { + CleanMask = "gmultitest*"; + } + + private const string Key = "gmultitest"; + private const string ListKey = "gmultitest-list"; + private const string SetKey = "gmultitest-set"; + private const string SortedSetKey = "gmultitest-sortedset"; + + readonly ShipperFactory modelFactory = new ShipperFactory(); + private IRedisTypedClientAsync typedClient; + private Shipper model; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + typedClient = RedisAsync.As(); + model = modelFactory.CreateInstance(1); + } + + + [Test] + public async Task Can_call_single_operation_in_pipeline() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValueAsync(Key, model)); + + await pipeline.FlushAsync(); + } + + modelFactory.AssertIsEqual(await typedClient.GetValueAsync(Key), model); + } + + [Test] + public async Task No_commit_of_atomic_pipelines_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.SetValueAsync(Key, model)); + } + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Exception_in_atomic_pipelines_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + try + { + await using var pipeline = typedClient.CreatePipeline(); + pipeline.QueueCommand(r => r.SetValueAsync(Key, model)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_pipeline() + { + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + + await pipeline.FlushAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_pipeline() + { + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); + + await pipeline.FlushAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_pipeline() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + var typedList = typedClient.Lists[ListKey]; + var typedSet = typedClient.Sets[SetKey]; + var typedSortedSet = typedClient.SortedSets[SortedSetKey]; + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToSetAsync(typedSet, modelFactory.CreateInstance(3))); + pipeline.QueueCommand(r => r.SetContainsItemAsync(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(4))); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(5))); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(6))); + pipeline.QueueCommand(r => r.GetListCountAsync(typedList), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSetCountAsync(typedSet), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSortedSetCountAsync(typedSortedSet), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await pipeline.FlushAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + + modelFactory.AssertListsAreEqual(await typedList.GetAllAsync(), new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) + }); + + Assert.That(await typedSet.GetAllAsync(), Is.EquivalentTo(new List + { + modelFactory.CreateInstance(3) + })); + + modelFactory.AssertListsAreEqual(await typedSortedSet.GetAllAsync(), new List + { + modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) + }); + } + + [Test] + public async Task Can_call_multi_string_operations_in_pipeline() + { + Shipper item1 = null; + Shipper item4 = null; + + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var pipeline = typedClient.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + pipeline.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + pipeline.QueueCommand(r => r.GetAllItemsFromListAsync(typedList), x => results = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(typedList, 0), x => item1 = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(typedList, 4), x => item4 = x); + + await pipeline.FlushAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + + modelFactory.AssertListsAreEqual(results, new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) + }); + + modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); + Assert.That(item4, Is.Null); + } + [Test] + public async Task Pipeline_can_be_replayed() + { + const string keySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.Null); + await using var pipeline = typedClient.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(keySquared)); + await pipeline.FlushAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.EqualTo("1")); + await typedClient.RemoveEntryAsync(Key); + await typedClient.RemoveEntryAsync(keySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.Null); + + await pipeline.ReplayAsync(); + await pipeline.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(keySquared), Is.EqualTo("1")); + + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs index e78e12b2..00d08630 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedPipelineTests.cs @@ -40,7 +40,7 @@ public void Can_call_single_operation_in_pipeline() using (var pipeline = typedClient.CreatePipeline()) { - pipeline.QueueCommand(r => r.SetEntry(Key, model)); + pipeline.QueueCommand(r => r.SetValue(Key, model)); pipeline.Flush(); } @@ -55,7 +55,7 @@ public void No_commit_of_atomic_pipelines_discards_all_commands() using (var pipeline = typedClient.CreatePipeline()) { - pipeline.QueueCommand(r => r.SetEntry(Key, model)); + pipeline.QueueCommand(r => r.SetValue(Key, model)); } Assert.That(typedClient.GetValue(Key), Is.Null); @@ -69,11 +69,11 @@ public void Exception_in_atomic_pipelines_discards_all_commands() { using (var pipeline = typedClient.CreatePipeline()) { - pipeline.QueueCommand(r => r.SetEntry(Key, model)); + pipeline.QueueCommand(r => r.SetValue(Key, model)); throw new NotSupportedException(); } } - catch (NotSupportedException ignore) + catch (NotSupportedException) { Assert.That(typedClient.GetValue(Key), Is.Null); } diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.Async.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.Async.cs new file mode 100644 index 00000000..9d096db7 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.Async.cs @@ -0,0 +1,240 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; + +namespace ServiceStack.Redis.Tests.Generic +{ + [TestFixture] + public class RedisTypedTransactionTestsAsync + : RedisClientTestsBaseAsync + { + private const string Key = "multitest"; + private const string ListKey = "multitest-list"; + private const string SetKey = "multitest-set"; + private const string SortedSetKey = "multitest-sortedset"; + + readonly ShipperFactory modelFactory = new ShipperFactory(); + private IRedisTypedClientAsync typedClient; + private Shipper model; + + public RedisTypedTransactionTestsAsync() + { + CleanMask = "multitest*"; + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + typedClient = RedisAsync.As(); + model = modelFactory.CreateInstance(1); + } + + [Test] + public async Task Can_call_single_operation_in_transaction() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.SetValueAsync(Key, model)); + + await trans.CommitAsync(); + } + + modelFactory.AssertIsEqual(await typedClient.GetValueAsync(Key), model); + } + + [Test] + public async Task No_commit_of_atomic_transactions_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.SetValueAsync(Key, model)); + } + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Exception_in_atomic_transactions_discards_all_commands() + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + try + { + await using var trans = await typedClient.CreateTransactionAsync(); + trans.QueueCommand(r => r.SetValueAsync(Key, model)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_transaction() + { + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + + await trans.CommitAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_transaction() + { + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1)), () => results.Add(1)); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2)), () => results.Add(2)); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3)), () => results.Add(3)); + + await trans.CommitAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_transaction() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + var typedList = typedClient.Lists[ListKey]; + var typedSet = typedClient.Sets[SetKey]; + var typedSortedSet = typedClient.SortedSets[SortedSetKey]; + + Assert.That(await typedClient.GetValueAsync(Key), Is.Null); + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToSetAsync(typedSet, modelFactory.CreateInstance(3))); + trans.QueueCommand(r => r.SetContainsItemAsync(typedSet, modelFactory.CreateInstance(3)), b => containsItem = b); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(4))); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(5))); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(typedSortedSet, modelFactory.CreateInstance(6))); + trans.QueueCommand(r => r.GetListCountAsync(typedList), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSetCountAsync(typedSet), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSortedSetCountAsync(typedSortedSet), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await trans.CommitAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + + modelFactory.AssertListsAreEqual(await typedList.GetAllAsync(), new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2) + }); + + Assert.That(await typedSet.GetAllAsync(), Is.EquivalentTo(new List + { + modelFactory.CreateInstance(3) + })); + + modelFactory.AssertListsAreEqual(await typedSortedSet.GetAllAsync(), new List + { + modelFactory.CreateInstance(4), modelFactory.CreateInstance(5), modelFactory.CreateInstance(6) + }); + } + + [Test] + public async Task Can_call_multi_string_operations_in_transaction() + { + Shipper item1 = null; + Shipper item4 = null; + + var results = new List(); + + var typedList = typedClient.Lists[ListKey]; + Assert.That(await typedList.CountAsync(), Is.EqualTo(0)); + + await using (var trans = await typedClient.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(1))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(2))); + trans.QueueCommand(r => r.AddItemToListAsync(typedList, modelFactory.CreateInstance(3))); + trans.QueueCommand(r => r.GetAllItemsFromListAsync(typedList), x => results = x); + trans.QueueCommand(r => r.GetItemFromListAsync(typedList, 0), x => item1 = x); + trans.QueueCommand(r => r.GetItemFromListAsync(typedList, 4), x => item4 = x); + + await trans.CommitAsync(); + } + + Assert.That(await typedList.CountAsync(), Is.EqualTo(3)); + + modelFactory.AssertListsAreEqual(results, new List + { + modelFactory.CreateInstance(1), modelFactory.CreateInstance(2), modelFactory.CreateInstance(3) + }); + + modelFactory.AssertIsEqual(item1, modelFactory.CreateInstance(1)); + Assert.That(item4, Is.Null); + } + [Test] + // Operations that are not supported in older versions will look at server info to determine what to do. + // If server info is fetched each time, then it will interfer with transaction + public async Task Can_call_operation_not_supported_on_older_servers_in_transaction() + { + var temp = new byte[1]; + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync("key", 5, temp)); + await trans.CommitAsync(); + } + + + [Test] + public async Task Transaction_can_be_replayed() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + await trans.CommitAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + await NativeAsync.DelAsync(Key); + await NativeAsync.DelAsync(KeySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + + await trans.ReplayAsync(); + await trans.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs index 632b76a6..02df210c 100644 --- a/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs +++ b/tests/ServiceStack.Redis.Tests/Generic/RedisTypedTransactionTests.cs @@ -39,7 +39,7 @@ public void Can_call_single_operation_in_transaction() using (var trans = typedClient.CreateTransaction()) { - trans.QueueCommand(r => r.SetEntry(Key, model)); + trans.QueueCommand(r => r.SetValue(Key, model)); trans.Commit(); } @@ -54,7 +54,7 @@ public void No_commit_of_atomic_transactions_discards_all_commands() using (var trans = typedClient.CreateTransaction()) { - trans.QueueCommand(r => r.SetEntry(Key, model)); + trans.QueueCommand(r => r.SetValue(Key, model)); } Assert.That(typedClient.GetValue(Key), Is.Null); @@ -68,11 +68,11 @@ public void Exception_in_atomic_transactions_discards_all_commands() { using (var trans = typedClient.CreateTransaction()) { - trans.QueueCommand(r => r.SetEntry(Key, model)); + trans.QueueCommand(r => r.SetValue(Key, model)); throw new NotSupportedException(); } } - catch (NotSupportedException ignore) + catch (NotSupportedException) { Assert.That(typedClient.GetValue(Key), Is.Null); } diff --git a/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs b/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs index 6b8b8b02..9283c309 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/IntegrationTestBase.cs @@ -59,7 +59,7 @@ protected void RunSimultaneously( #else var clientAsyncResults = new List(); #endif - using (var manager = clientManagerFactory(TestConfig.MasterHosts, TestConfig.SlaveHosts)) + using (var manager = clientManagerFactory(TestConfig.MasterHosts, TestConfig.ReplicaHosts)) { for (var i = 0; i < noOfConcurrentClients; i++) { @@ -79,25 +79,25 @@ protected void RunSimultaneously( WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); #endif - Debug.WriteLine(String.Format("Time Taken: {0}", (Stopwatch.GetTimestamp() - before) / 1000)); + Debug.WriteLine($"Time Taken: {(Stopwatch.GetTimestamp() - before) / 1000}"); } protected static void CheckHostCountMap(Dictionary hostCountMap) { Debug.WriteLine(TypeSerializer.SerializeToString(hostCountMap)); - if (TestConfig.SlaveHosts.Length <= 1) return; + if (TestConfig.ReplicaHosts.Length <= 1) return; var hostCount = 0; foreach (var entry in hostCountMap) { if (entry.Value < 5) { - Debug.WriteLine("ERROR: Host has unproportianate distrobution: " + entry.Value); + Debug.WriteLine("ERROR: Host has unproportionate distribution: " + entry.Value); } if (entry.Value > 60) { - Debug.WriteLine("ERROR: Host has unproportianate distrobution: " + entry.Value); + Debug.WriteLine("ERROR: Host has unproportionate distribution: " + entry.Value); } hostCount += entry.Value; } diff --git a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs index 8f3a6884..92d0f551 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/MultiThreadedRedisClientIntegrationTests.cs @@ -17,7 +17,7 @@ public class MultiThreadedRedisClientIntegrationTests { private static string testData; - [TestFixtureSetUp] + [OneTimeSetUp] public void onBeforeTestFixture() { var results = 100.Times(x => ModelWithFieldsOfDifferentTypes.Create(x)); @@ -96,7 +96,7 @@ private static void UseClient(RedisClient client, int clientNo) Log("Client '{0}' is using '{1}'", clientNo, client.Host); var testClientKey = "test:" + host + ":" + clientNo; - client.SetEntry(testClientKey, testData); + client.SetValue(testClientKey, testData); var result = client.GetValue(testClientKey) ?? ""; Log("\t{0} => {1} len {2} {3} len", testClientKey, diff --git a/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs b/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs index 3b4786eb..a5aa4707 100644 --- a/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs +++ b/tests/ServiceStack.Redis.Tests/Integration/RedisRegressionTestRun.cs @@ -14,7 +14,7 @@ public class RedisRegressionTestRun { private static string testData; - [TestFixtureSetUp] + [OneTimeSetUp] public void onBeforeTestFixture() { var results = 100.Times(x => ModelWithFieldsOfDifferentTypes.Create(x)); @@ -22,7 +22,7 @@ public void onBeforeTestFixture() testData = TypeSerializer.SerializeToString(results); } - [Explicit("Can hang CI")] + [Ignore("Can hang CI")] [Test] public void Can_support_64_threads_using_the_client_simultaneously() { @@ -31,7 +31,7 @@ public void Can_support_64_threads_using_the_client_simultaneously() const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 var clientAsyncResults = new List(); - using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.SlaveHosts)) + using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.ReplicaHosts)) { manager.GetClient().Run(x => x.FlushAll()); diff --git a/tests/ServiceStack.Redis.Tests/Issues/ConnectionStringConfigIssues.cs b/tests/ServiceStack.Redis.Tests/Issues/ConnectionStringConfigIssues.cs new file mode 100644 index 00000000..f2f68d1b --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Issues/ConnectionStringConfigIssues.cs @@ -0,0 +1,27 @@ +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Issues +{ + public class ConnectionStringConfigIssues + { + [Test] + public void Can_use_password_with_equals() + { + var connString = "127.0.0.1?password=" + "p@55w0rd=".UrlEncode(); + + var config = connString.ToRedisEndpoint(); + Assert.That(config.Password, Is.EqualTo("p@55w0rd=")); + } + + [Test, Ignore("Requires redis-server configured with 'requirepass p@55w0rd='")] + public void Can_connect_to_redis_with_password_with_equals() + { + var connString = "127.0.0.1?password=" + "p@55w0rd=".UrlEncode(); + var redisManager = new PooledRedisClientManager(connString); + using (var redis = redisManager.GetClient()) + { + Assert.That(redis.Password, Is.EqualTo("p@55w0rd=")); + } + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/PipelineIssueTests.cs b/tests/ServiceStack.Redis.Tests/Issues/PipelineIssueTests.cs new file mode 100644 index 00000000..f675d178 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/Issues/PipelineIssueTests.cs @@ -0,0 +1,81 @@ +using System; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests.Issues +{ + [TestFixture] + public class PipelineIssueTests + : RedisClientTestsBase + { + [Test] + public void Disposing_Client_Clears_Pipeline() + { + var clientMgr = new PooledRedisClientManager(TestConfig.SingleHost); + + using (var client = clientMgr.GetClient()) + { + client.Set("k1", "v1"); + client.Set("k2", "v2"); + client.Set("k3", "v3"); + + using (var pipe = client.CreatePipeline()) + { + pipe.QueueCommand(c => c.Get("k1"), p => { throw new Exception(); }); + pipe.QueueCommand(c => c.Get("k2")); + + try + { + pipe.Flush(); + } + catch (Exception) + { + //The exception is expected. Swallow it. + } + } + } + + using (var client = clientMgr.GetClient()) + { + Assert.AreEqual("v3", client.Get("k3")); + } + } + + [Test] + public void Can_Set_with_DateTime_in_Pipeline() + { + using (var clientsManager = new RedisManagerPool(TestConfig.SingleHost)) + { + bool result; + int value = 111; + string key = $"key:{value}"; + + // Set key with pipeline (batching many requests) + using (var redis = clientsManager.GetClient()) + { + using (var pipeline = redis.CreatePipeline()) + { + //Only atomic operations can be called within a Transaction or Pipeline + Assert.Throws(() => + pipeline.QueueCommand(r => r.Set(key, value, DateTime.Now.AddMinutes(1)), r => result = r)); + } + + using (var pipeline = redis.CreatePipeline()) + { + pipeline.QueueCommand(r => r.Set(key, value), r => result = r); + pipeline.QueueCommand(r => r.ExpireEntryAt(key, DateTime.Now.AddMinutes(1))); + + pipeline.Flush(); + } + } + + // Get key + using (var redis = clientsManager.GetClient()) + { + var res = redis.Get(key); + Assert.That(res, Is.EqualTo(value)); + } + } + + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs b/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs index fc4373eb..2b44d1d8 100644 --- a/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs +++ b/tests/ServiceStack.Redis.Tests/Issues/ReportedIssues.cs @@ -1,5 +1,8 @@ using System.Collections.Generic; +using System.Linq; using NUnit.Framework; +using ServiceStack.Redis.Generic; +using ServiceStack.Text; namespace ServiceStack.Redis.Tests.Issues { @@ -49,5 +52,60 @@ public void Success_callback_fails_for_pipeline_using_GetItemScoreInSortedSet() Assert.That(score, Is.EqualTo(1)); } + + public class Test + { + public int Id { get; set; } + public string Name { get; set; } + + protected bool Equals(Test other) => Id == other.Id && Name == other.Name; + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((Test) obj); + } + + public override int GetHashCode() + { + unchecked + { + return (Id * 397) ^ (Name != null ? Name.GetHashCode() : 0); + } + } + } + + [Test] + public void Try_simulate_NRE_when_calling_GetAllEntriesFromHash_using_BasicRedisClientManager() + { + using (var redisManager = new BasicRedisClientManager(TestConfig.SingleHost)) + using (var redis = redisManager.GetClient()) + { + IRedisHash testHash = redis.As() + .GetHash("test-hash"); + + Assert.That(testHash.Count, Is.EqualTo(0)); + + var contents = testHash.GetAll(); + Assert.That(contents.Count, Is.EqualTo(0)); + + var test1 = new Test { Id = 1, Name = "Name1" }; + var test2 = new Test { Id = 2, Name = "Name2" }; + testHash["A"] = test1; + testHash["B"] = test2; + + contents = testHash.GetAll(); + + Assert.That(contents, Is.EqualTo(new Dictionary { + ["A"] = test1, + ["B"] = test2, + })); + + Assert.That(testHash["A"], Is.EqualTo(test1)); + Assert.That(testHash["B"], Is.EqualTo(test2)); + } + } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs b/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs index cf02e08a..fda38588 100644 --- a/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs +++ b/tests/ServiceStack.Redis.Tests/Issues/TransactionIssueTests.cs @@ -112,7 +112,7 @@ private void CheckConnection(object state) Task.Factory.StartNew(CheckThisConnection); } - [Explicit, Test] + [Ignore("Integration"), Test] public void Can_queue_large_transaction() { var q = new System.Threading.Timer(CheckConnection, null, 30000, 2); diff --git a/tests/ServiceStack.Redis.Tests/LexTests.Async.cs b/tests/ServiceStack.Redis.Tests/LexTests.Async.cs new file mode 100644 index 00000000..d00e3f0f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LexTests.Async.cs @@ -0,0 +1,115 @@ +using NUnit.Framework; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class LexTestsAsync + : RedisClientTestsBaseAsync + { + readonly string[] values = "a,b,c,d,e,f,g".Split(','); + + [SetUp] + public async Task SetUp() + { + await RedisAsync.FlushAllAsync(); + foreach(var x in values) + { + await NativeAsync.ZAddAsync("zset", 0, x.ToUtf8Bytes()); + } + } + + [Test] + public async Task Can_ZRangeByLex_all_entries() + { + var results = await NativeAsync.ZRangeByLexAsync("zset", "-", "+"); + + Assert.That(results.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(values)); + + results = await NativeAsync.ZRangeByLexAsync("zset", "-", "+", 1, 3); + Assert.That(results.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "b", "c", "d" })); + } + + [Test] + public async Task Can_ZRangeByLex_Desc() + { + var descInclusive = await NativeAsync.ZRangeByLexAsync("zset", "-", "[c"); + Assert.That(descInclusive.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "a", "b", "c" })); + + var descExclusive = await NativeAsync.ZRangeByLexAsync("zset", "-", "(c"); + Assert.That(descExclusive.Map(x => x.FromUtf8Bytes()), Is.EquivalentTo(new[] { "a", "b" })); + } + + [Test] + public async Task Can_ZRangeByLex_Min_and_Max() + { + var range = await NativeAsync.ZRangeByLexAsync("zset", "[aaa", "(g"); + Assert.That(range.Map(x => x.FromUtf8Bytes()), + Is.EquivalentTo(new[] { "b", "c", "d", "e", "f" })); + } + + [Test] + public async Task Can_ZlexCount() + { + var total = await NativeAsync.ZLexCountAsync("zset", "-", "+"); + Assert.That(total, Is.EqualTo(values.Length)); + + Assert.That(await NativeAsync.ZLexCountAsync("zset", "-", "[c"), Is.EqualTo(3)); + Assert.That(await NativeAsync.ZLexCountAsync("zset", "-", "(c"), Is.EqualTo(2)); + } + + [Test] + public async Task Can_ZRemRangeByLex() + { + var removed = await NativeAsync.ZRemRangeByLexAsync("zset", "[aaa", "(g"); + Assert.That(removed, Is.EqualTo(5)); + + var remainder = await NativeAsync.ZRangeByLexAsync("zset", "-", "+"); + Assert.That(remainder.Map(x => x.FromUtf8Bytes()), Is.EqualTo(new[] { "a", "g" })); + } + + [Test] + public async Task Can_SearchSortedSet() + { + Assert.That(await RedisAsync.SearchSortedSetAsync("zset"), Is.EquivalentTo(values)); + Assert.That(await RedisAsync.SearchSortedSetAsync("zset", start: "-"), Is.EquivalentTo(values)); + Assert.That(await RedisAsync.SearchSortedSetAsync("zset", end: "+"), Is.EquivalentTo(values)); + + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", start: "[aaa")).Count, Is.EqualTo(values.Length - 1)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", end: "(g")).Count, Is.EqualTo(values.Length - 1)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "[aaa", "(g")).Count, Is.EqualTo(values.Length - 2)); + + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "a", "c")).Count, Is.EqualTo(3)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "[a", "[c")).Count, Is.EqualTo(3)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "a", "(c")).Count, Is.EqualTo(2)); + Assert.That((await RedisAsync.SearchSortedSetAsync("zset", "(a", "(c")).Count, Is.EqualTo(1)); + } + + [Test] + public async Task Can_SearchSortedSetCount() + { + Assert.That(await RedisAsync.SearchSortedSetAsync("zset"), Is.EquivalentTo(values)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", start: "-"), Is.EqualTo(values.Length)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", end: "+"), Is.EqualTo(values.Length)); + + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", start: "[aaa"), Is.EqualTo(values.Length - 1)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", end: "(g"), Is.EqualTo(values.Length - 1)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "[aaa", "(g"), Is.EqualTo(values.Length - 2)); + + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "a", "c"), Is.EqualTo(3)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "[a", "[c"), Is.EqualTo(3)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "a", "(c"), Is.EqualTo(2)); + Assert.That(await RedisAsync.SearchSortedSetCountAsync("zset", "(a", "(c"), Is.EqualTo(1)); + } + + [Test] + public async Task Can_RemoveRangeFromSortedSetBySearch() + { + var removed = await RedisAsync.RemoveRangeFromSortedSetBySearchAsync("zset", "[aaa", "(g"); + Assert.That(removed, Is.EqualTo(5)); + + var remainder = await RedisAsync.SearchSortedSetAsync("zset"); + Assert.That(remainder, Is.EqualTo(new[] { "a", "g" })); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs b/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs index 8155482a..7a1f4ad2 100644 --- a/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs +++ b/tests/ServiceStack.Redis.Tests/LicenseUsageTests.cs @@ -24,11 +24,7 @@ public void SetUp() [TearDown] public void TearDown() { -#if NETCORE - Licensing.RegisterLicense(Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE")); -#else - Licensing.RegisterLicense(new AppSettings().GetString("servicestack:license")); -#endif + Licensing.RegisterLicense(Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE")); } [Test] @@ -51,7 +47,7 @@ public void Throws_on_access_of_21_types() } } - [Test, Explicit("Takes too long - but works!")] + [Test, Ignore("Takes too long - but works!")] public void Allows_access_of_6000_operations() { using (var client = new RedisClient(TestConfig.SingleHost)) @@ -60,7 +56,7 @@ public void Allows_access_of_6000_operations() } } - [Test, Explicit("Takes too long - but works!")] + [Test, Ignore("Takes too long - but works!")] public void Throws_on_access_of_6100_operations() { using (var client = new RedisClient(TestConfig.SingleHost)) @@ -78,7 +74,7 @@ public class RegisteredLicenseUsageTests : LicenseUsageTests public void Allows_access_of_21_types() { #if NETCORE - Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); + Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); #else Licensing.RegisterLicense(new AppSettings().GetString("servicestack:license")); #endif @@ -92,11 +88,11 @@ public void Allows_access_of_21_types() } } - [Test, Explicit("Takes too long - but works!")] + [Test, Ignore("Takes too long - but works!")] public void Allows_access_of_6100_operations() { #if NETCORE - Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); + Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); #else Licensing.RegisterLicense(new AppSettings().GetString("servicestack:license")); #endif diff --git a/tests/ServiceStack.Redis.Tests/LuaCachedScripts.Async.cs b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.Async.cs new file mode 100644 index 00000000..4cec7af8 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.Async.cs @@ -0,0 +1,297 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + [Category("Async")] + public class LuaCachedScriptsAsync + { + private const string LuaScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = 0 +local len = 0 +local results = {} + +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(results, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit + +return results +"; + + private static async Task AddTestKeysAsync(IRedisClientAsync redis, int count) + { + for (int i = 0; i < count; i++) + await redis.SetValueAsync("key:" + i, "value:" + i); + } + + [Test] + public async Task Can_call_repeated_scans_in_LUA() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecLuaAsync(LuaScript, "key:*", "10"); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecLuaAsync(LuaScript, "key:*", "40"); + Assert.That(r.Children.Count, Is.EqualTo(20)); + } + + [Test] + public async Task Can_call_Cached_Lua() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_call_Cached_Lua_even_after_script_is_flushed() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + await ((IRedisNativeClientAsync)redis).ScriptFlushAsync(); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, "key:*", "10")); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_call_repeated_scans_in_LUA_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecLuaAsync(LuaScript, null, new[] { "key:*", "10" }); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecLuaAsync(LuaScript, null, new[] { "key:*", "40" }); + Assert.That(r.Children.Count, Is.EqualTo(20)); + } + + [Test] + public async Task Can_call_Cached_Lua_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + [Test] + public async Task Can_call_Cached_Lua_even_after_script_is_flushed_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + await AddTestKeysAsync(redis, 20); + + var r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + + await ((IRedisNativeClientAsync)redis).ScriptFlushAsync(); + + r = await redis.ExecCachedLuaAsync(LuaScript, sha1 => + redis.ExecLuaShaAsync(sha1, null, new[] { "key:*", "10" })); + Assert.That(r.Children.Count, Is.EqualTo(10)); + } + + private const string KeyAttributesScript = @" +local limit = tonumber(ARGV[2]) +local pattern = ARGV[1] +local cursor = 0 +local len = 0 +local keys = {} + +repeat + local r = redis.call('scan', cursor, 'MATCH', pattern, 'COUNT', limit) + cursor = tonumber(r[1]) + for k,v in ipairs(r[2]) do + table.insert(keys, v) + len = len + 1 + if len == limit then break end + end +until cursor == 0 or len == limit + +local keyAttrs = {} +for i,key in ipairs(keys) do + local type = redis.call('type', key)['ok'] + local pttl = redis.call('pttl', key) + local size = 0 + if type == 'string' then + size = redis.call('strlen', key) + elseif type == 'list' then + size = redis.call('llen', key) + elseif type == 'set' then + size = redis.call('scard', key) + elseif type == 'zset' then + size = redis.call('zcard', key) + elseif type == 'hash' then + size = redis.call('hlen', key) + end + + local attrs = {['id'] = key, ['type'] = type, ['ttl'] = pttl, ['size'] = size} + + table.insert(keyAttrs, attrs) +end + +return cjson.encode(keyAttrs)"; + + [Test] + public async Task Can_call_script_with_complex_response() + { + await using var redis = new RedisClient().ForAsyncOnly(); + var r = await redis.ExecCachedLuaAsync(KeyAttributesScript, sha1 => + redis.ExecLuaShaAsStringAsync(sha1, "key:*", "10")); + + r.Print(); + + var results = r.FromJson>(); + + Assert.That(results.Count, Is.EqualTo(10)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + + [Test] + public async Task Can_call_script_with_complex_response_longhand() + { + await using var redis = new RedisClient().ForAsyncOnly(); + var r = await redis.ExecCachedLuaAsync(KeyAttributesScript, sha1 => + redis.ExecLuaShaAsStringAsync(sha1, null, new[] { "key:*", "10" })); + + r.Print(); + + var results = r.FromJson>(); + + Assert.That(results.Count, Is.EqualTo(10)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + + public class SearchResult + { + public string Id { get; set; } + public string Type { get; set; } + public long Ttl { get; set; } + public long Size { get; set; } + } + + [Test] + public async Task Can_merge_multiple_SearchResults() + { + await using var Redis = new RedisClient().ForAsyncOnly(); + var limit = 10; + var query = "key:*"; + + List keys = new List(limit); + await foreach (var key in Redis.ScanAllKeysAsync(pattern: query, pageSize: limit)) + { + keys.Add(key); + if (keys.Count == limit) break; + } + + var keyTypes = new Dictionary(); + var keyTtls = new Dictionary(); + var keySizes = new Dictionary(); + + if (keys.Count > 0) + { + await using (var pipeline = Redis.CreatePipeline()) + { + foreach (var key in keys) + pipeline.QueueCommand(r => r.TypeAsync(key), x => keyTypes[key] = x); + + foreach (var key in keys) + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).PTtlAsync(key), x => keyTtls[key] = x); + + await pipeline.FlushAsync(); + } + + await using (var pipeline = Redis.CreatePipeline()) + { + foreach (var entry in keyTypes) + { + var key = entry.Key; + switch (entry.Value) + { + case "string": + pipeline.QueueCommand(r => r.GetStringCountAsync(key), x => keySizes[key] = x); + break; + case "list": + pipeline.QueueCommand(r => r.GetListCountAsync(key), x => keySizes[key] = x); + break; + case "set": + pipeline.QueueCommand(r => r.GetSetCountAsync(key), x => keySizes[key] = x); + break; + case "zset": + pipeline.QueueCommand(r => r.GetSortedSetCountAsync(key), x => keySizes[key] = x); + break; + case "hash": + pipeline.QueueCommand(r => r.GetHashCountAsync(key), x => keySizes[key] = x); + break; + } + } + + await pipeline.FlushAsync(); + } + } + + var results = keys.Map(x => new SearchResult + { + Id = x, + Type = keyTypes.GetValueOrDefault(x), + Ttl = keyTtls.GetValueOrDefault(x), + Size = keySizes.GetValueOrDefault(x), + }); + + Assert.That(results.Count, Is.EqualTo(limit)); + + var result = results[0]; + Assert.That(result.Id.StartsWith("key:")); + Assert.That(result.Type, Is.EqualTo("string")); + Assert.That(result.Size, Is.GreaterThan("value:".Length)); + Assert.That(result.Ttl, Is.EqualTo(-1)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs index fd764dfe..80491fec 100644 --- a/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs +++ b/tests/ServiceStack.Redis.Tests/LuaCachedScripts.cs @@ -71,6 +71,8 @@ public void Can_call_Cached_Lua_even_after_script_is_flushed() { using (var redis = new RedisClient()) { + AddTestKeys(redis, 20); + var r = redis.ExecCachedLua(LuaScript, sha1 => redis.ExecLuaSha(sha1, "key:*", "10")); Assert.That(r.Children.Count, Is.EqualTo(10)); diff --git a/tests/ServiceStack.Redis.Tests/NetCoreTestsRunner.cs b/tests/ServiceStack.Redis.Tests/NetCoreTestsRunner.cs new file mode 100644 index 00000000..24012602 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/NetCoreTestsRunner.cs @@ -0,0 +1,39 @@ +//NUnitLite isn't recognized in VS2017 - shouldn't need NUnitLite with NUnit 3.5+ https://github.com/nunit/dotnet-test-nunit +#if NUNITLITE +using NUnitLite; +using NUnit.Common; +using System.Reflection; +using ServiceStack; +using ServiceStack.Text; +using System; +using System.Globalization; +using System.Threading; + +namespace NUnitLite.Tests +{ + public class NetCoreTestsRunner + { + /// + /// The main program executes the tests. Output may be routed to + /// various locations, depending on the arguments passed. + /// + /// Run with --help for a full list of arguments supported + /// + public static int Main(string[] args) + { + var licenseKey = Environment.GetEnvironmentVariable("SERVICESTACK_LICENSE"); + if (licenseKey.IsNullOrEmpty()) + throw new ArgumentNullException("SERVICESTACK_LICENSE", "Add Environment variable for SERVICESTACK_LICENSE"); + + Licensing.RegisterLicense(licenseKey); + //"ActivatedLicenseFeatures: ".Print(LicenseUtils.ActivatedLicenseFeatures()); + + CultureInfo.DefaultThreadCurrentCulture = new CultureInfo("en-US"); + JsConfig.InitStatics(); + //JsonServiceClient client = new JsonServiceClient(); + var writer = new ExtendedTextWrapper(Console.Out); + return new AutoRun(((IReflectableType)typeof(NetCoreTestsRunner)).GetTypeInfo().Assembly).Execute(args, writer, Console.In); + } + } +} +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.Async.cs b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.Async.cs new file mode 100644 index 00000000..fdf442f6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.Async.cs @@ -0,0 +1,440 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class PooledRedisClientManagerTestsAsync + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.VerifyMasterConnections = false; + } + + [OneTimeTearDown] + public void OneTimeTearDown() + { + RedisConfig.VerifyMasterConnections = true; + } + + readonly string[] testReadWriteHosts = new[] { + "readwrite1", "readwrite2:6000", "192.168.0.1", "localhost" + }; + + readonly string[] testReadOnlyHosts = new[] { + "read1", "read2:7000", "127.0.0.1" + }; + + private string firstReadWriteHost; + private string firstReadOnlyHost; + + [SetUp] + public void OnBeforeEachTest() + { + firstReadWriteHost = testReadWriteHosts[0]; + firstReadOnlyHost = testReadOnlyHosts[0]; + } + + public IRedisClientsManagerAsync CreateManager(string[] readWriteHosts, string[] readOnlyHosts, int? defaultDb = null) + { + return new PooledRedisClientManager(readWriteHosts, readOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = readWriteHosts.Length, + MaxReadPoolSize = readOnlyHosts.Length, + AutoStart = false, + DefaultDb = defaultDb + }); + } + public IRedisClientsManagerAsync CreateManager(params string[] readWriteHosts) + { + return CreateManager(readWriteHosts, readWriteHosts); + } + + public IRedisClientsManagerAsync CreateManager() + { + return CreateManager(testReadWriteHosts, testReadOnlyHosts); + } + + public IRedisClientsManagerAsync CreateAndStartManager() + { + var manager = CreateManager(); + ((PooledRedisClientManager)manager).Start(); + return manager; + } + + [Test] + public async Task Cant_get_client_without_calling_Start() + { + await using var manager = CreateManager(); + try + { + var client = await manager.GetClientAsync(); + } + catch (InvalidOperationException) + { + return; + } + Assert.Fail("Should throw"); + } + + [Test] + public async Task Can_change_db_for_client_PooledRedisClientManager() + { + await using IRedisClientsManagerAsync db1 = new PooledRedisClientManager(1, new string[] { TestConfig.SingleHost }); + await using IRedisClientsManagerAsync db2 = new PooledRedisClientManager(2, new string[] { TestConfig.SingleHost }); + var val = Environment.TickCount; + var key = "test" + val; + var db1c = await db1.GetClientAsync(); + var db2c = await db2.GetClientAsync(); + try + { + await db1c.SetAsync(key, val); + Assert.That(await db2c.GetAsync(key), Is.EqualTo(0)); + Assert.That(await db1c.GetAsync(key), Is.EqualTo(val)); + } + finally + { + await db1c.RemoveAsync(key); + } + } + + [Test] + public async Task Can_change_db_for_client_RedisManagerPool() + { + await using IRedisClientsManagerAsync db1 = new RedisManagerPool(TestConfig.SingleHost + "?db=1"); + await using IRedisClientsManagerAsync db2 = new RedisManagerPool(TestConfig.SingleHost + "?db=2"); + var val = Environment.TickCount; + var key = "test" + val; + var db1c = await db1.GetClientAsync(); + var db2c = await db2.GetClientAsync(); + try + { + await db1c.SetAsync(key, val); + Assert.That(await db2c.GetAsync(key), Is.EqualTo(0)); + Assert.That(await db1c.GetAsync(key), Is.EqualTo(val)); + } + finally + { + await db1c.RemoveAsync(key); + } + } + + [Test] + public async Task Can_change_db_for_client_BasicRedisClientManager() + { + await using IRedisClientsManagerAsync db1 = new BasicRedisClientManager(1, new string[] { TestConfig.SingleHost }); + await using IRedisClientsManagerAsync db2 = new BasicRedisClientManager(2, new string[] { TestConfig.SingleHost }); + var val = Environment.TickCount; + var key = "test" + val; + var db1c = await db1.GetClientAsync(); + var db2c = await db2.GetClientAsync(); + try + { + await db1c.SetAsync(key, val); + Assert.That(await db2c.GetAsync(key), Is.EqualTo(0)); + Assert.That(await db1c.GetAsync(key), Is.EqualTo(val)); + } + finally + { + await db1c.RemoveAsync(key); + } + } + + [Test] + public async Task Can_get_client_after_calling_Start() + { + await using var manager = CreateAndStartManager(); + var client = await manager.GetClientAsync(); + } + + [Test] + public async Task Can_get_ReadWrite_client() + { + await using var manager = CreateAndStartManager(); + var client = await manager.GetClientAsync(); + + AssertClientHasHost(client, firstReadWriteHost); + } + + private static void AssertClientHasHost(IRedisClientAsync client, string hostWithOptionalPort) + { + var parts = hostWithOptionalPort.Split(':'); + var port = parts.Length > 1 ? int.Parse(parts[1]) : RedisConfig.DefaultPort; + + Assert.That(client.Host, Is.EqualTo(parts[0])); + Assert.That(client.Port, Is.EqualTo(port)); + } + + [Test] + public async Task Can_get_ReadOnly_client() + { + await using var manager = CreateAndStartManager(); + var client = await manager.GetReadOnlyClientAsync(); + + AssertClientHasHost(client, firstReadOnlyHost); + } + + [Test] + public async Task Does_loop_through_ReadWrite_hosts() + { + await using var manager = CreateAndStartManager(); + var client1 = await manager.GetClientAsync(); + await client1.DisposeAsync(); + var client2 = await manager.GetClientAsync(); + var client3 = await manager.GetClientAsync(); + var client4 = await manager.GetClientAsync(); + var client5 = await manager.GetClientAsync(); + + AssertClientHasHost(client1, testReadWriteHosts[0]); + AssertClientHasHost(client2, testReadWriteHosts[1]); + AssertClientHasHost(client3, testReadWriteHosts[2]); + AssertClientHasHost(client4, testReadWriteHosts[3]); + AssertClientHasHost(client5, testReadWriteHosts[0]); + } + + [Test] + public async Task Does_loop_through_ReadOnly_hosts() + { + await using var manager = CreateAndStartManager(); + var client1 = await manager.GetReadOnlyClientAsync(); + await client1.DisposeAsync(); + var client2 = await manager.GetReadOnlyClientAsync(); + await client2.DisposeAsync(); + var client3 = await manager.GetReadOnlyClientAsync(); + var client4 = await manager.GetReadOnlyClientAsync(); + var client5 = await manager.GetReadOnlyClientAsync(); + + AssertClientHasHost(client1, testReadOnlyHosts[0]); + AssertClientHasHost(client2, testReadOnlyHosts[1]); + AssertClientHasHost(client3, testReadOnlyHosts[2]); + AssertClientHasHost(client4, testReadOnlyHosts[0]); + AssertClientHasHost(client5, testReadOnlyHosts[1]); + } + + [Test] + public async Task Can_have_different_pool_size_and_host_configurations() + { + var writeHosts = new[] { "readwrite1" }; + var readHosts = new[] { "read1", "read2" }; + + const int poolSizeMultiplier = 4; + + await using IRedisClientsManagerAsync manager = new PooledRedisClientManager(writeHosts, readHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = writeHosts.Length * poolSizeMultiplier, + MaxReadPoolSize = readHosts.Length * poolSizeMultiplier, + AutoStart = true, + } + ); + //A poolsize of 4 will not block getting 4 clients + await using (var client1 = await manager.GetClientAsync()) + await using (var client2 = await manager.GetClientAsync()) + await using (var client3 = await manager.GetClientAsync()) + await using (var client4 = await manager.GetClientAsync()) + { + AssertClientHasHost(client1, writeHosts[0]); + AssertClientHasHost(client2, writeHosts[0]); + AssertClientHasHost(client3, writeHosts[0]); + AssertClientHasHost(client4, writeHosts[0]); + } + + //A poolsize of 8 will not block getting 8 clients + await using (var client1 = await manager.GetReadOnlyClientAsync()) + await using (var client2 = await manager.GetReadOnlyClientAsync()) + await using (var client3 = await manager.GetReadOnlyClientAsync()) + await using (var client4 = await manager.GetReadOnlyClientAsync()) + await using (var client5 = await manager.GetReadOnlyClientAsync()) + await using (var client6 = await manager.GetReadOnlyClientAsync()) + await using (var client7 = await manager.GetReadOnlyClientAsync()) + await using (var client8 = await manager.GetReadOnlyClientAsync()) + { + AssertClientHasHost(client1, readHosts[0]); + AssertClientHasHost(client2, readHosts[1]); + AssertClientHasHost(client3, readHosts[0]); + AssertClientHasHost(client4, readHosts[1]); + AssertClientHasHost(client5, readHosts[0]); + AssertClientHasHost(client6, readHosts[1]); + AssertClientHasHost(client7, readHosts[0]); + AssertClientHasHost(client8, readHosts[1]); + } + } + + [Test] + public async Task Does_block_ReadWrite_clients_pool() + { + await using IRedisClientsManagerAsync manager = CreateAndStartManager(); + var delay = TimeSpan.FromSeconds(1); + var client1 = await manager.GetClientAsync(); + var client2 = await manager.GetClientAsync(); + var client3 = await manager.GetClientAsync(); + var client4 = await manager.GetClientAsync(); + +#pragma warning disable IDE0039 // Use local function + Action func = async delegate +#pragma warning restore IDE0039 // Use local function + { + await Task.Delay(delay + TimeSpan.FromSeconds(0.5)); + await client4.DisposeAsync(); + }; + +#if NETCORE + _ = Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + + var start = DateTime.Now; + + var client5 = await manager.GetClientAsync(); + + Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); + + AssertClientHasHost(client1, testReadWriteHosts[0]); + AssertClientHasHost(client2, testReadWriteHosts[1]); + AssertClientHasHost(client3, testReadWriteHosts[2]); + AssertClientHasHost(client4, testReadWriteHosts[3]); + AssertClientHasHost(client5, testReadWriteHosts[3]); + } + + [Test] + public async Task Does_block_ReadOnly_clients_pool() + { + var delay = TimeSpan.FromSeconds(1); + + await using var manager = CreateAndStartManager(); + var client1 = await manager.GetReadOnlyClientAsync(); + var client2 = await manager.GetReadOnlyClientAsync(); + var client3 = await manager.GetReadOnlyClientAsync(); + +#pragma warning disable IDE0039 // Use local function + Action func = async delegate +#pragma warning restore IDE0039 // Use local function + { + await Task.Delay(delay + TimeSpan.FromSeconds(0.5)); + await client3.DisposeAsync(); + }; +#if NETCORE + _ =Task.Run(func); +#else + func.BeginInvoke(null, null); +#endif + var start = DateTime.Now; + + var client4 = await manager.GetReadOnlyClientAsync(); + + Assert.That(DateTime.Now - start, Is.GreaterThanOrEqualTo(delay)); + + AssertClientHasHost(client1, testReadOnlyHosts[0]); + AssertClientHasHost(client2, testReadOnlyHosts[1]); + AssertClientHasHost(client3, testReadOnlyHosts[2]); + AssertClientHasHost(client4, testReadOnlyHosts[2]); + } + + [Test] + public async Task Does_throw_TimeoutException_when_PoolTimeout_exceeded() + { + await using IRedisClientsManagerAsync manager = new PooledRedisClientManager(testReadWriteHosts, testReadOnlyHosts, + new RedisClientManagerConfig + { + MaxWritePoolSize = 4, + MaxReadPoolSize = 4, + AutoStart = false, + }); + ((PooledRedisClientManager)manager).PoolTimeout = 100; + + ((PooledRedisClientManager)manager).Start(); + + var masters = 4.Times(i => manager.GetClientAsync()); + + try + { + await manager.GetClientAsync(); + Assert.Fail("Should throw TimeoutException"); + } + catch (TimeoutException ex) + { + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); + } + + for (int i = 0; i < 4; i++) + { + await manager.GetReadOnlyClientAsync(); + } + + try + { + await manager.GetReadOnlyClientAsync(); + Assert.Fail("Should throw TimeoutException"); + } + catch (TimeoutException ex) + { + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); + } + } + + //[Ignore("tempromental integration test")] + //[Test] + //public void Can_support_64_threads_using_the_client_simultaneously() + //{ + // const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 + // var clientUsageMap = new Dictionary(); + + // var clientAsyncResults = new List(); + // using (var manager = CreateAndStartManager()) + // { + // for (var i = 0; i < noOfConcurrentClients; i++) + // { + // var clientNo = i; + // var action = (Action)(() => UseClient(manager, clientNo, clientUsageMap)); + // clientAsyncResults.Add(action.BeginInvoke(null, null)); + // } + // } + + // WaitHandle.WaitAll(clientAsyncResults.ConvertAll(x => x.AsyncWaitHandle).ToArray()); + + // RedisStats.ToDictionary().PrintDump(); + + // Debug.WriteLine(TypeSerializer.SerializeToString(clientUsageMap)); + + // var hostCount = 0; + // foreach (var entry in clientUsageMap) + // { + // Assert.That(entry.Value, Is.GreaterThanOrEqualTo(2), "Host has unproportionate distribution: " + entry.Value); + // Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportionate distribution: " + entry.Value); + // hostCount += entry.Value; + // } + + // Assert.That(hostCount, Is.EqualTo(noOfConcurrentClients), "Invalid no of clients used"); + //} + + //private static void UseClient(IRedisClientsManager manager, int clientNo, Dictionary hostCountMap) + //{ + // using (var client = manager.GetClient()) + // { + // lock (hostCountMap) + // { + // int hostCount; + // if (!hostCountMap.TryGetValue(client.Host, out hostCount)) + // { + // hostCount = 0; + // } + + // hostCountMap[client.Host] = ++hostCount; + // } + + // Debug.WriteLine(String.Format("Client '{0}' is using '{1}'", clientNo, client.Host)); + // } + //} + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs index ded0295a..d1a9f7cb 100644 --- a/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs +++ b/tests/ServiceStack.Redis.Tests/PooledRedisClientManagerTests.cs @@ -14,14 +14,14 @@ namespace ServiceStack.Redis.Tests [TestFixture, Category("Integration")] public class PooledRedisClientManagerTests { - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { RedisConfig.VerifyMasterConnections = false; } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { RedisConfig.VerifyMasterConnections = true; } @@ -345,7 +345,7 @@ public void Does_block_ReadOnly_clients_pool() Thread.Sleep(delay + TimeSpan.FromSeconds(0.5)); client3.Dispose(); }; -#if NETCORE +#if NETCORE Task.Run(func); #else func.BeginInvoke(null, null); @@ -387,10 +387,10 @@ public void Does_throw_TimeoutException_when_PoolTimeout_exceeded() } catch (TimeoutException ex) { - Assert.That(ex.Message, Is.StringStarting("Redis Timeout expired.")); + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); } - var slaves = 4.Times(i => manager.GetReadOnlyClient()); + var replicas = 4.Times(i => manager.GetReadOnlyClient()); try { @@ -399,12 +399,12 @@ public void Does_throw_TimeoutException_when_PoolTimeout_exceeded() } catch (TimeoutException ex) { - Assert.That(ex.Message, Is.StringStarting("Redis Timeout expired.")); + Assert.That(ex.Message, Does.StartWith("Redis Timeout expired.")); } } } - //[Explicit,Ignore("tempromental integration test")] + //[Ignore("tempromental integration test")] //[Test] //public void Can_support_64_threads_using_the_client_simultaneously() //{ @@ -431,8 +431,8 @@ public void Does_throw_TimeoutException_when_PoolTimeout_exceeded() // var hostCount = 0; // foreach (var entry in clientUsageMap) // { - // Assert.That(entry.Value, Is.GreaterThanOrEqualTo(2), "Host has unproportianate distribution: " + entry.Value); - // Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportianate distribution: " + entry.Value); + // Assert.That(entry.Value, Is.GreaterThanOrEqualTo(2), "Host has unproportionate distribution: " + entry.Value); + // Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportionate distribution: " + entry.Value); // hostCount += entry.Value; // } diff --git a/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs b/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs index da860cde..157ff9b9 100644 --- a/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs +++ b/tests/ServiceStack.Redis.Tests/Properties/AssemblyInfo.cs @@ -33,4 +33,4 @@ // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] -[assembly: AssemblyFileVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.Async.cs new file mode 100644 index 00000000..53bab942 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.Async.cs @@ -0,0 +1,266 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Data; +using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Script; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisBasicPersistenceProviderTestsAsync + : RedisClientTestsBaseAsync + { + List testModels; + + public static string TestModelIdsSetKey = "ids:" + typeof(TestModel).Name; + + public class TestModel + : IHasId + { + public Guid Id { get; set; } + public string Name { get; set; } + public int Age { get; set; } + + //Thanking R# for the timesaver + public bool Equals(TestModel other) + { + if (other is null) return false; + if (ReferenceEquals(this, other)) return true; + return other.Id.Equals(Id) && Equals(other.Name, Name) && other.Age == Age; + } + + public override bool Equals(object obj) + { + if (obj is null) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != typeof(TestModel)) return false; + return Equals((TestModel)obj); + } + + [SuppressMessage("Style", "IDE0070:Use 'System.HashCode'", Justification = "not in netfx")] + public override int GetHashCode() + { + unchecked + { + int result = Id.GetHashCode(); + result = (result * 397) ^ (Name != null ? Name.GetHashCode() : 0); + result = (result * 397) ^ Age; + return result; + } + } + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + + RedisRaw.NamespacePrefix = "RedisBasicPersistenceProviderTests"; + testModels = new List(); + 5.Times(i => testModels.Add( + new TestModel { Id = Guid.NewGuid(), Name = "Name" + i, Age = 20 + i })); + } + + [Test] + public async Task Can_Store() + { + foreach (var x in testModels) + { + await RedisAsync.StoreAsync(x); + } + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + } + + [Test] + public async Task Can_StoreAll() + { + await RedisAsync.StoreAllAsync(testModels); + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + } + + [Test] + public async Task Can_WriteAll() + { + await RedisAsync.WriteAllAsync(testModels); + + var testModelIds = testModels.ConvertAll(x => x.Id); + + var allModels = (await RedisAsync.GetByIdsAsync(testModelIds)) + .OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + } + + [Test] + public async Task Can_GetById() + { + await RedisAsync.StoreAllAsync(testModels); + + var last = testModels.Last(); + var lastById = await RedisAsync.GetByIdAsync(last.Id); + + Assert.That(lastById, Is.EqualTo(last)); + } + + [Test] + public async Task Can_GetByIds() + { + await RedisAsync.StoreAllAsync(testModels); + + var evenTestModels = testModels.Where(x => x.Age % 2 == 0) + .OrderBy(x => x.Id).ToList(); + var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); + + var selectedModels = (await RedisAsync.GetByIdsAsync(evenTestModelIds)) + .OrderBy(x => x.Id).ToList(); + + Assert.That(selectedModels, Is.EqualTo(evenTestModels)); + } + + [Test] + public async Task Can_Delete() + { + await RedisAsync.StoreAllAsync(testModels); + + var last = testModels.Last(); + await RedisAsync.DeleteAsync(last); + + testModels.Remove(last); + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EquivalentTo(testModels)); + + //Test internal TestModelIdsSetKey state + var idsRemaining = (await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + TestModelIdsSetKey)) + .OrderBy(x => x).Map(x => new Guid(x)); + + var testModelIds = testModels.OrderBy(x => x.Id).Map(x => x.Id); + + Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); + } + + [Test] + public async Task Can_DeleteAll() + { + await RedisAsync.StoreAllAsync(testModels); + + await RedisAsync.DeleteAllAsync(); + + var allModels = await RedisAsync.As().GetAllAsync(); + + Assert.That(allModels, Is.Empty); + + //Test internal TestModelIdsSetKey state + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_DeleteAll_with_runtime_type() + { + await RedisAsync.StoreAllAsync(testModels); + + var mi = typeof(IEntityStoreAsync).GetMethod(nameof(IEntityStoreAsync.DeleteAllAsync)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + await (Task)genericMi.Invoke(RedisAsync, new object[] { CancellationToken.None }); + + var allModels = await RedisAsync.As().GetAllAsync(); + Assert.That(allModels, Is.Empty); + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_As_DeleteAll_with_runtime_type() + { + await RedisAsync.StoreAllAsync(testModels); + + var mi = typeof(IRedisClientAsync).GetMethod(nameof(IRedisClientAsync.As)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + var typedClient = genericMi.Invoke(RedisAsync, TypeConstants.EmptyObjectArray); + var deleteMi = typeof(IEntityStoreAsync).GetMethod(nameof(IEntityStoreAsync.DeleteAllAsync)); + await (Task)deleteMi.Invoke(typedClient, new object[] { CancellationToken.None }); + + var allModels = await RedisAsync.As().GetAllAsync(); + Assert.That(allModels, Is.Empty); + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_As_DeleteAll_with_script() + { + await RedisAsync.StoreAllAsync(testModels); + + var context = new ScriptContext + { + ScriptLanguages = { ScriptLisp.Language }, + AllowScriptingOfAllTypes = true, + ScriptMethods = { + new ProtectedScripts() + }, + Args = { + ["redis"] = RedisAsync + } + }.Init(); + + var type = typeof(TestModel).FullName; +#if DEBUG + RedisRaw.DebugAllowSync = true; // not reasonable to allow async from Lisp +#endif + context.EvaluateCode($"redis.call('DeleteAll<{type}>') |> return"); + context.EvaluateCode($"redis.call('As<{type}>').call('DeleteAll') |> return"); + context.RenderLisp($"(call redis \"DeleteAll<{type}>\")"); + context.RenderLisp($"(call (call redis \"As<{type}>\") \"DeleteAll\")"); +#if DEBUG + RedisRaw.DebugAllowSync = false; +#endif + + var allModels = await RedisAsync.As().GetAllAsync(); + Assert.That(allModels, Is.Empty); + var idsRemaining = await RedisAsync.GetAllItemsFromSetAsync(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public async Task Can_DeleteByIds() + { + await RedisAsync.StoreAllAsync(testModels); + + var evenTestModels = testModels.Where(x => x.Age % 2 == 0) + .OrderBy(x => x.Id).ToList(); + var evenTestModelIds = evenTestModels.Select(x => x.Id).ToList(); + + await RedisAsync.DeleteByIdsAsync(evenTestModelIds); + + evenTestModels.ForEach(x => testModels.Remove(x)); + + var allModels = (await RedisAsync.As().GetAllAsync()).OrderBy(x => x.Age).ToList(); + + Assert.That(allModels, Is.EqualTo(testModels)); + + + //Test internal TestModelIdsSetKey state + var idsRemaining = (await RedisAsync.GetAllItemsFromSetAsync(RedisRaw.NamespacePrefix + TestModelIdsSetKey)) + .OrderBy(x => x).Map(x => new Guid(x)); + + var testModelIds = testModels.OrderBy(x => x.Id).Map(x => x.Id); + + Assert.That(idsRemaining, Is.EquivalentTo(testModelIds)); + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs index 484e82c4..86a10ff2 100644 --- a/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisBasicPersistenceProviderTests.cs @@ -3,6 +3,8 @@ using System.Linq; using NUnit.Framework; using ServiceStack.Model; +using ServiceStack.Redis.Generic; +using ServiceStack.Script; using ServiceStack.Text; namespace ServiceStack.Redis.Tests @@ -158,6 +160,66 @@ public void Can_DeleteAll() Assert.That(idsRemaining, Is.Empty); } + [Test] + public void Can_DeleteAll_with_runtime_type() + { + Redis.StoreAll(testModels); + + var mi = Redis.GetType().GetMethod(nameof(RedisClient.DeleteAll)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + genericMi.Invoke(Redis, TypeConstants.EmptyObjectArray); + + var allModels = Redis.GetAll(); + Assert.That(allModels, Is.Empty); + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public void Can_As_DeleteAll_with_runtime_type() + { + Redis.StoreAll(testModels); + + var mi = Redis.GetType().GetMethod(nameof(RedisClient.As)); + var genericMi = mi.MakeGenericMethod(typeof(TestModel)); + var typedClient = genericMi.Invoke(Redis, TypeConstants.EmptyObjectArray); + var deleteMi = typedClient.GetType().GetMethod(nameof(IRedisTypedClient.DeleteAll)); + deleteMi.Invoke(typedClient, TypeConstants.EmptyObjectArray); + + var allModels = Redis.GetAll(); + Assert.That(allModels, Is.Empty); + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + + [Test] + public void Can_As_DeleteAll_with_script() + { + Redis.StoreAll(testModels); + + var context = new ScriptContext { + ScriptLanguages = { ScriptLisp.Language }, + AllowScriptingOfAllTypes = true, + ScriptMethods = { + new ProtectedScripts() + }, + Args = { + ["redis"] = Redis + } + }.Init(); + + var type = typeof(TestModel).FullName; + context.EvaluateCode($"redis.call('DeleteAll<{type}>') |> return"); + context.EvaluateCode($"redis.call('As<{type}>').call('DeleteAll') |> return"); + context.RenderLisp($"(call redis \"DeleteAll<{type}>\")"); + context.RenderLisp($"(call (call redis \"As<{type}>\") \"DeleteAll\")"); + + var allModels = Redis.GetAll(); + Assert.That(allModels, Is.Empty); + var idsRemaining = Redis.GetAllItemsFromSet(TestModelIdsSetKey); + Assert.That(idsRemaining, Is.Empty); + } + [Test] public void Can_DeleteByIds() { diff --git a/tests/ServiceStack.Redis.Tests/RedisBatchTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisBatchTests.Async.cs new file mode 100644 index 00000000..3a3e1e82 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisBatchTests.Async.cs @@ -0,0 +1,46 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisBatchTestsAsync + : RedisClientTestsBaseAsync + { + public class Message + { + public long Id { get; set; } + public string Key { get; set; } + public string Value { get; set; } + public string Description { get; set; } + } + + [Test] + public async Task Store_batch_items_in_List() + { + var redisMessages = RedisAsync.As(); + const int batchSize = 500; + var nextIds = await redisMessages.GetNextSequenceAsync(batchSize); + + var msgBatch = batchSize.Times(i => + new Message + { + Id = nextIds - (batchSize - i) + 1, + Key = i.ToString(), + Value = Guid.NewGuid().ToString(), + Description = "Description" + }); + + await redisMessages.Lists["listName"].AddRangeAsync(msgBatch); + + var msgs = await redisMessages.Lists["listName"].GetAllAsync(); + Assert.That(msgs.Count, Is.EqualTo(batchSize)); + + Assert.That(msgs.First().Id, Is.EqualTo(1)); + Assert.That(msgs.Last().Id, Is.EqualTo(500)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs b/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs index 073ada21..d4c70384 100644 --- a/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisBenchmarkTests.cs @@ -1,15 +1,13 @@ -#if !NETCORE_SUPPORT -using System; +using System; using System.Diagnostics; using NUnit.Framework; using ServiceStack.Logging; using ServiceStack.Redis; using ServiceStack.Text; - namespace ServiceStack.Redis.Tests { - [TestFixture, Explicit] + [TestFixture, Ignore("Benchmark")] public class RedisBenchmarkTests : RedisClientTestsBase { @@ -119,7 +117,7 @@ public void Compare_sort_nosort_to_smembers_mget() } } - [TestFixture, Explicit] + [TestFixture, Ignore("Benchmark")] public class RawBytesSetBenchmark { public void Run(string name, int nBlockSizeBytes, Action fn) @@ -161,16 +159,6 @@ public void Benchmark_SET_raw_bytes_8MB_ServiceStack() (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); } - [Test] - public void Benchmark_SET_raw_bytes_8MB_Sider() - { - // Create Redis Wrapper - var redis = new Sider.RedisClient(); - - Run("Sider 8MB", 8000000, - (i, bytes) => redis.SetRaw("eitan" + i.ToString(), bytes)); - } - [Test] public void Benchmark_SET_raw_bytes_1MB_ServiceStack() { @@ -180,16 +168,6 @@ public void Benchmark_SET_raw_bytes_1MB_ServiceStack() (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); } - [Test] - public void Benchmark_SET_raw_bytes_1MB_Sider() - { - // Create Redis Wrapper - var redis = new Sider.RedisClient(); - - Run("Sider 1MB", 1000000, - (i, bytes) => redis.SetRaw("eitan" + i.ToString(), bytes)); - } - [Test] public void Benchmark_SET_raw_bytes_100k_ServiceStack() { @@ -199,16 +177,6 @@ public void Benchmark_SET_raw_bytes_100k_ServiceStack() (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); } - [Test] - public void Benchmark_SET_raw_bytes_100k_Sider() - { - // Create Redis Wrapper - var redis = new Sider.RedisClient(); - - Run("Sider 100K", 100000, - (i, bytes) => redis.SetRaw("eitan" + i.ToString(), bytes)); - } - [Test] public void Benchmark_SET_raw_bytes_10k_ServiceStack() { @@ -218,16 +186,6 @@ public void Benchmark_SET_raw_bytes_10k_ServiceStack() (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); } - [Test] - public void Benchmark_SET_raw_bytes_10k_Sider() - { - // Create Redis Wrapper - var redis = new Sider.RedisClient(); - - Run("Sider 10K", 10000, - (i, bytes) => redis.SetRaw("eitan" + i.ToString(), bytes)); - } - [Test] public void Benchmark_SET_raw_bytes_1k_ServiceStack() { @@ -236,17 +194,6 @@ public void Benchmark_SET_raw_bytes_1k_ServiceStack() Run("ServiceStack.Redis 1K", 1000, (i, bytes) => redis.Set("eitan" + i.ToString(), bytes)); } - - [Test] - public void Benchmark_SET_raw_bytes_1k_Sider() - { - // Create Redis Wrapper - var redis = new Sider.RedisClient(); - - Run("Sider 1K", 1000, - (i, bytes) => redis.SetRaw("eitan" + i.ToString(), bytes)); - } } -} -#endif \ No newline at end of file +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.Async.cs new file mode 100644 index 00000000..4ee85060 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.Async.cs @@ -0,0 +1,139 @@ +using System; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Caching; +using ServiceStack.Common.Tests.Models; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + [Category("Async")] + public class RedisCacheClientTestsAsync + { + private ICacheClientAsync cacheClient; + + [SetUp] + public async Task OnBeforeEachTest() + { + if (cacheClient is object) + await cacheClient.DisposeAsync(); + + cacheClient = new RedisClient(TestConfig.SingleHost); + await cacheClient.FlushAllAsync(); + } + + [Test] + public async Task Get_non_existant_value_returns_null() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + var existingModel = await cacheClient.GetAsync(cacheKey); + Assert.That(existingModel, Is.Null); + } + + [Test] + public async Task Get_non_existant_generic_value_returns_null() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + var existingModel = await cacheClient.GetAsync(cacheKey); + Assert.That(existingModel, Is.Null); + } + + [Test] + public async Task Can_store_and_get_model() + { + var model = ModelWithIdAndName.Create(1); + var cacheKey = model.CreateUrn(); + await cacheClient.SetAsync(cacheKey, model); + + var existingModel = await cacheClient.GetAsync(cacheKey); + ModelWithIdAndName.AssertIsEqual(existingModel, model); + } + + [Test] + public async Task Can_store_null_model() + { + await cacheClient.SetAsync("test-key", null); + } + + [Test] + public async Task Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + await cacheClient.SetAsync(key, value); + var resultValue = await cacheClient.GetAsync(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + [Test] + public async Task Can_Replace_By_Pattern() + { + var model = ModelWithIdAndName.Create(1); + string modelKey = "model:" + model.CreateUrn(); + await cacheClient.AddAsync(modelKey, model); + + model = ModelWithIdAndName.Create(2); + string modelKey2 = "xxmodelxx:" + model.CreateUrn(); + await cacheClient.AddAsync(modelKey2, model); + + string s = "this is a string"; + await cacheClient.AddAsync("string1", s); + + var removable = (IRemoveByPatternAsync)cacheClient; + await removable.RemoveByPatternAsync("*model*"); + + ModelWithIdAndName result = await cacheClient.GetAsync(modelKey); + Assert.That(result, Is.Null); + + result = await cacheClient.GetAsync(modelKey2); + Assert.That(result, Is.Null); + + string result2 = await cacheClient.GetAsync("string1"); + Assert.That(result2, Is.EqualTo(s)); + + await removable.RemoveByPatternAsync("string*"); + + result2 = await cacheClient.GetAsync("string1"); + Assert.That(result2, Is.Null); + } + + [Test] + public async Task Can_GetTimeToLive() + { + var model = ModelWithIdAndName.Create(1); + string key = "model:" + model.CreateUrn(); + await cacheClient.AddAsync(key, model); + + var ttl = await cacheClient.GetTimeToLiveAsync(key); + Assert.That(ttl, Is.EqualTo(TimeSpan.MaxValue)); + + await cacheClient.SetAsync(key, model, expiresIn: TimeSpan.FromSeconds(10)); + ttl = await cacheClient.GetTimeToLiveAsync(key); + Assert.That(ttl.Value, Is.GreaterThanOrEqualTo(TimeSpan.FromSeconds(9))); + Assert.That(ttl.Value, Is.LessThanOrEqualTo(TimeSpan.FromSeconds(10))); + + await cacheClient.RemoveAsync(key); + ttl = await cacheClient.GetTimeToLiveAsync(key); + Assert.That(ttl, Is.Null); + } + + [Test] + public async Task Can_increment_and_reset_values() + { + await using var client = await new RedisManagerPool(TestConfig.SingleHost).GetCacheClientAsync(); + + Assert.That(await client.IncrementAsync("incr:counter", 10), Is.EqualTo(10)); + await client.SetAsync("incr:counter", 0); + Assert.That(await client.IncrementAsync("incr:counter", 10), Is.EqualTo(10)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs index ebb1052e..6817d7ec 100644 --- a/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisCacheClientTests.cs @@ -1,3 +1,5 @@ +#if !NETCORE //TODO: find out why fails to build in .netcoreapp1.1 + using System; using NUnit.Framework; using ServiceStack.Caching; @@ -72,7 +74,6 @@ public void Can_Set_and_Get_key_with_all_byte_values() Assert.That(resultValue, Is.EquivalentTo(value)); } -#if !NETCORE_SUPPORT [Test] public void Can_Replace_By_Pattern() { @@ -103,7 +104,6 @@ public void Can_Replace_By_Pattern() result2 = cacheClient.Get("string1"); Assert.That(result2, Is.Null); } -#endif [Test] public void Can_GetTimeToLive() @@ -124,5 +124,18 @@ public void Can_GetTimeToLive() ttl = cacheClient.GetTimeToLive(key); Assert.That(ttl, Is.Null); } + + [Test] + public void Can_increment_and_reset_values() + { + using (var client = new RedisManagerPool(TestConfig.SingleHost).GetCacheClient()) + { + Assert.That(client.Increment("incr:counter", 10), Is.EqualTo(10)); + client.Set("incr:counter", 0); + Assert.That(client.Increment("incr:counter", 10), Is.EqualTo(10)); + } + } } } + +#endif \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.Async.cs new file mode 100644 index 00000000..8dfe12ec --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.Async.cs @@ -0,0 +1,108 @@ +using System; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientConfigTestsAsync + : RedisClientTestsBaseAsync + { + [Ignore("Hurts MSOpenTech Redis Server")] + [Test] + public async Task Can_Set_and_Get_Config() + { + var orig = await RedisAsync.GetConfigAsync("maxmemory"); + var newMaxMemory = (long.Parse(orig) + 1).ToString(); + await RedisAsync.SetConfigAsync("maxmemory", newMaxMemory); + var current = await RedisAsync.GetConfigAsync("maxmemory"); + Assert.That(current, Is.EqualTo(newMaxMemory)); + } + + [Test] + public async Task Can_Rewrite_Redis_Config() + { + try + { + await RedisAsync.SaveConfigAsync(); + } + catch (RedisResponseException ex) + { + if (ex.Message.StartsWith("Rewriting config file: Permission denied") + || ex.Message.StartsWith("The server is running without a config file")) + return; + throw; + } + } + + [Test] + public async Task Can_Rewrite_Info_Stats() + { + await RedisAsync.ResetInfoStatsAsync(); + } + + [Test] + public async Task Can_set_and_Get_Client_Name() + { + var clientName = "CLIENT-" + Environment.TickCount; + await RedisAsync.SetClientAsync(clientName); + var client = await RedisAsync.GetClientAsync(); + + Assert.That(client, Is.EqualTo(clientName)); + } + + [Test] + public async Task Can_GetClientsInfo() + { + var clientList = await RedisAsync.GetClientsInfoAsync(); + clientList.PrintDump(); + } + + [Test] + public async Task Can_Kill_Client() + { + var clientList = await RedisAsync.GetClientsInfoAsync(); + var firstAddr = clientList.First()["addr"]; + await RedisAsync.KillClientAsync(firstAddr); + } + + [Test] + public async Task Can_Kill_Clients() + { + await RedisAsync.KillClientsAsync(fromAddress: "192.168.0.1:6379"); + await RedisAsync.KillClientsAsync(withId: "1"); + await RedisAsync.KillClientsAsync(ofType: RedisClientType.Normal); + await RedisAsync.KillClientsAsync(ofType: RedisClientType.PubSub); + await RedisAsync.KillClientsAsync(ofType: RedisClientType.Slave); + await RedisAsync.KillClientsAsync(skipMe: true); + await RedisAsync.KillClientsAsync(fromAddress: "192.168.0.1:6379", withId: "1", ofType: RedisClientType.Normal); + await RedisAsync.KillClientsAsync(skipMe: false); + } + + [Test] + public async Task Can_get_Role_Info() + { + var result = await NativeAsync.RoleAsync(); + result.PrintDump(); + Assert.That(result.Children[0].Text, Is.EqualTo("master")); + Assert.That(await RedisAsync.GetServerRoleAsync(), Is.EqualTo(RedisServerRole.Master)); + + //needs redis-server v3.0 + //var replica = new RedisClient("10.0.0.9:6380"); + //result = replica.Role(); + //result.PrintDump(); + } + + [Test] + public Task Can_PauseAllClients() + { + //needs redis-server v3.0 + //var replica = new RedisClient("10.0.0.9:6380"); + //replica.PauseAllClients(TimeSpan.FromSeconds(2)); + + return Task.CompletedTask; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs index 39475c8a..44b6b4f1 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientConfigTests.cs @@ -9,7 +9,7 @@ namespace ServiceStack.Redis.Tests public class RedisClientConfigTests : RedisClientTestsBase { - [Explicit("Hurts MSOpenTech Redis Server")] + [Ignore("Hurts MSOpenTech Redis Server")] [Test] public void Can_Set_and_Get_Config() { @@ -29,7 +29,8 @@ public void Can_Rewrite_Redis_Config() } catch (RedisResponseException ex) { - if (ex.Message.StartsWith("Rewriting config file: Permission denied")) + if (ex.Message.StartsWith("Rewriting config file: Permission denied") + || ex.Message.StartsWith("The server is running without a config file")) return; throw; } @@ -88,8 +89,8 @@ public void Can_get_Role_Info() Assert.That(Redis.GetServerRole(), Is.EqualTo(RedisServerRole.Master)); //needs redis-server v3.0 - //var slave = new RedisClient("10.0.0.9:6380"); - //result = slave.Role(); + //var replica = new RedisClient("10.0.0.9:6380"); + //result = replica.Role(); //result.PrintDump(); } @@ -97,8 +98,8 @@ public void Can_get_Role_Info() public void Can_PauseAllClients() { //needs redis-server v3.0 - //var slave = new RedisClient("10.0.0.9:6380"); - //slave.PauseAllClients(TimeSpan.FromSeconds(2)); + //var replica = new RedisClient("10.0.0.9:6380"); + //replica.PauseAllClients(TimeSpan.FromSeconds(2)); } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.Async.cs new file mode 100644 index 00000000..8a92f078 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.Async.cs @@ -0,0 +1,200 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisClientEvalTestsAsync : RedisClientTestsBaseAsync + { + public override void OnBeforeEachTest() + { + //base.OnBeforeEachTest(); + + //Run on local build server + RedisRaw = new RedisClient(TestConfig.SingleHost); + RedisRaw.FlushAll(); + } + + [Test] + public async Task Can_Eval_int() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return 3141591", Array.Empty()); + Assert.That(intVal, Is.EqualTo(3141591)); + } + + [Test] + public async Task Can_EvalSha_int() + { + var luaBody = "return 3141591"; + await RedisAsync.ExecLuaAsIntAsync(luaBody, Array.Empty()); + var sha1 = await RedisAsync.CalculateSha1Async(luaBody); + var intVal = await RedisAsync.ExecLuaShaAsIntAsync(sha1, Array.Empty()); + Assert.That(intVal, Is.EqualTo(3141591)); + } + + [Test] + public async Task Can_Eval_int_with_args() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return 3141591", new[] { "20", "30", "40" }); + Assert.That(intVal, Is.EqualTo(3141591)); + } + + [Test] + public async Task Can_Eval_int_with_keys_and_args() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return KEYS[1] + ARGV[1]", new[] { "20" }, new[] { "30", "40" }); + Assert.That(intVal, Is.EqualTo(50)); + } + + [Test] + public async Task Can_Eval_int2() + { + var intVal = await RedisAsync.ExecLuaAsIntAsync("return ARGV[1] + ARGV[2]", new[] { "10", "20" }); + Assert.That(intVal, Is.EqualTo(30)); + } + + [Test] + public async Task Can_Eval_string() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return 'abc'", new string[0]); + Assert.That(strVal, Is.EqualTo("abc")); + } + + [Test] + public async Task Can_Eval_HelloWorld_string() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return 'Hello, ' .. ARGV[1] .. '!'", new[] { "Redis Lua" }); + Assert.That(strVal, Is.EqualTo("Hello, Redis Lua!")); + } + + [Test] + public async Task Can_Eval_string_with_args() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return 'abc'", new[] { "at", "dot", "com" }); + Assert.That(strVal, Is.EqualTo("abc")); + } + + [Test] + public async Task Can_Eval_string_with_keys_an_args() + { + var strVal = await RedisAsync.ExecLuaAsStringAsync(@"return KEYS[1] .. ARGV[1]", new[] { "at" }, new[] { "dot", "com" }); + Assert.That(strVal, Is.EqualTo("atdot")); + } + + [Test] + public async Task Can_Eval_multidata_with_args() + { + var strVals = await RedisAsync.ExecLuaAsListAsync(@"return {ARGV[1],ARGV[2],ARGV[3]}", new[] { "at", "dot", "com" }); + Assert.That(strVals, Is.EquivalentTo(new List { "at", "dot", "com" })); + } + + [Test] + public async Task Can_Eval_multidata_with_keys_and_args() + { + var strVals = await RedisAsync.ExecLuaAsListAsync(@"return {KEYS[1],ARGV[1],ARGV[2]}", new[] { "at" }, new[] { "dot", "com" }); + Assert.That(strVals, Is.EquivalentTo(new List { "at", "dot", "com" })); + } + + [Test] + public async Task Can_Load_and_Exec_script() + { + var luaBody = "return 'load script and exec'"; + var sha1 = await RedisAsync.LoadLuaScriptAsync(luaBody); + var result = await RedisAsync.ExecLuaShaAsStringAsync(sha1, new string[0]); + Assert.That(result, Is.EqualTo("load script and exec")); + } + + [Test] + public async Task Does_flush_all_scripts() + { + var luaBody = "return 'load script and exec'"; + var sha1 = await RedisAsync.LoadLuaScriptAsync(luaBody); + var result = await RedisAsync.ExecLuaShaAsStringAsync(sha1, new string[0]); + Assert.That(result, Is.EqualTo("load script and exec")); + + await RedisAsync.RemoveAllLuaScriptsAsync(); + + try + { + result = await RedisAsync.ExecLuaShaAsStringAsync(sha1, new string[0]); + Assert.Fail("script shouldn't exist"); + } + catch (RedisResponseException ex) + { + Assert.That(ex.Message, Does.Contain("NOSCRIPT")); + } + } + + [Test] + public async Task Can_detect_which_scripts_exist() + { + var sha1 = await RedisAsync.LoadLuaScriptAsync("return 'script1'"); + var sha2 = await RedisAsync.CalculateSha1Async("return 'script2'"); + var sha3 = await RedisAsync.LoadLuaScriptAsync("return 'script3'"); + + Assert.That(await RedisAsync.HasLuaScriptAsync(sha1)); + + var existsMap = await RedisAsync.WhichLuaScriptsExistsAsync(new[] { sha1, sha2, sha3 }); + Assert.That(existsMap[sha1]); + Assert.That(!existsMap[sha2]); + Assert.That(existsMap[sha3]); + } + + [Test] + public async Task Can_create_ZPop_with_lua() + { + var luaBody = @" + local val = redis.call('zrange', KEYS[1], 0, ARGV[1]-1) + if val then redis.call('zremrangebyrank', KEYS[1], 0, ARGV[1]-1) end + return val"; + + var i = 0; + var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); + foreach (var x in alphabet) + { + await RedisAsync.AddItemToSortedSetAsync("zalphabet", x, i++); + } + + var letters = await RedisAsync.ExecLuaAsListAsync(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); + + letters.PrintDump(); + Assert.That(letters, Is.EquivalentTo(new[] { "A", "B", "C" })); + } + + [Test] + public async Task Can_create_ZRevPop_with_lua() + { + var luaBody = @" + local val = redis.call('zrange', KEYS[1], -ARGV[1], -1) + if val then redis.call('zremrangebyrank', KEYS[1], -ARGV[1], -1) end + return val"; + + var i = 0; + var alphabet = 26.Times(c => ((char)('A' + c)).ToString()); + foreach(var x in alphabet) + { + await RedisAsync.AddItemToSortedSetAsync("zalphabet", x, i++); + } + + var letters = await RedisAsync.ExecLuaAsListAsync(luaBody, keys: new[] { "zalphabet" }, args: new[] { "3" }); + + letters.PrintDump(); + Assert.That(letters, Is.EquivalentTo(new[] { "X", "Y", "Z" })); + } + + [Test] + public async Task Can_return_DaysOfWeek_as_list() + { + foreach(var x in Enum.GetNames(typeof(DayOfWeek)).ToList()) + { + await RedisAsync.AddItemToListAsync("DaysOfWeek", x); + } + (await RedisAsync.ExecLuaAsListAsync("return redis.call('LRANGE', 'DaysOfWeek', 0, -1)", new string[0])).PrintDump(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs index a7407963..b31ff348 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientEvalTests.cs @@ -125,7 +125,7 @@ public void Does_flush_all_scripts() } catch (RedisResponseException ex) { - Assert.That(ex.Message, Is.StringContaining("NOSCRIPT")); + Assert.That(ex.Message, Does.Contain("NOSCRIPT")); } } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientHashTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientHashTests.Async.cs new file mode 100644 index 00000000..f95f9376 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientHashTests.Async.cs @@ -0,0 +1,351 @@ +using NUnit.Framework; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientHashTestsAsync + : RedisClientTestsBaseAsync + { + private const string HashId = "rchtesthash"; + + Dictionary stringMap; + Dictionary stringIntMap; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + stringIntMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + } + + public override void OnAfterEachTest() + { + CleanMask = HashId + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_SetItemInHash_and_GetAllFromHash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_RemoveFromHash() + { + const string removeMember = "two"; + + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + await RedisAsync.RemoveEntryFromHashAsync(HashId, removeMember); + + stringMap.Remove(removeMember); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_GetItemFromHash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var hashValue = await RedisAsync.GetValueFromHashAsync(HashId, "two"); + + Assert.That(hashValue, Is.EqualTo(stringMap["two"])); + } + + [Test] + public async Task Can_GetHashCount() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var hashCount = await RedisAsync.GetHashCountAsync(HashId); + + Assert.That(hashCount, Is.EqualTo(stringMap.Count)); + } + + [Test] + public async Task Does_HashContainsKey() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + Assert.That(await RedisAsync.HashContainsEntryAsync(HashId, existingMember), Is.True); + Assert.That(await RedisAsync.HashContainsEntryAsync(HashId, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_GetHashKeys() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + var expectedKeys = stringMap.Map(x => x.Key); + + var hashKeys = await RedisAsync.GetHashKeysAsync(HashId); + + Assert.That(hashKeys, Is.EquivalentTo(expectedKeys)); + } + + [Test] + public async Task Can_GetHashValues() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + var expectedValues = stringMap.Map(x => x.Value); + + var hashValues = await RedisAsync.GetHashValuesAsync(HashId); + + Assert.That(hashValues, Is.EquivalentTo(expectedValues)); + } + + [Test] + public async Task Can_enumerate_small_IDictionary_Hash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var members = new List(); + await foreach (var item in RedisAsync.Hashes[HashId]) + { + Assert.That(stringMap.ContainsKey(item.Key), Is.True); + members.Add(item.Key); + } + Assert.That(members.Count, Is.EqualTo(stringMap.Count)); + } + + [Test] + public async Task Can_Add_to_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_Clear_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + Assert.That(await hash.CountAsync(), Is.EqualTo(stringMap.Count)); + + await hash.ClearAsync(); + + Assert.That(await hash.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + Assert.That(await hash.ContainsKeyAsync("two"), Is.True); + Assert.That(await hash.ContainsKeyAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IDictionary_Hash() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringMap) + { + await hash.AddAsync(x); + } + + stringMap.Remove("two"); + await hash.RemoveAsync("two"); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + private static Dictionary ToStringMap(Dictionary stringIntMap) + { + var map = new Dictionary(); + foreach (var kvp in stringIntMap) + { + map[kvp.Key] = kvp.Value.ToString(); + } + return map; + } + + [Test] + public async Task Can_increment_Hash_field() + { + var hash = RedisAsync.Hashes[HashId]; + foreach (var x in stringIntMap) + { + await hash.AddAsync(x.Key, x.Value.ToString()); + } + + stringIntMap["two"] += 10; + await RedisAsync.IncrementValueInHashAsync(HashId, "two", 10); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(ToStringMap(stringIntMap))); + } + + [Test] + public async Task Can_increment_Hash_field_beyond_32_bits() + { + await RedisAsync.SetEntryInHashAsync(HashId, "int", Int32.MaxValue.ToString()); + await RedisAsync.IncrementValueInHashAsync(HashId, "int", 1); + long actual = Int64.Parse(await RedisAsync.GetValueFromHashAsync(HashId, "int")); + long expected = Int32.MaxValue + 1L; + Assert.That(actual, Is.EqualTo(expected)); + } + + [Test] + public async Task Can_SetItemInHashIfNotExists() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + await RedisAsync.SetEntryInHashIfNotExistsAsync(HashId, "two", "did not change existing item"); + await RedisAsync.SetEntryInHashIfNotExistsAsync(HashId, "five", "changed non existing item"); + stringMap["five"] = "changed non existing item"; + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_SetRangeInHash() + { + var newStringMap = new Dictionary { + {"five","e"}, {"six","f"}, {"seven","g"} + }; + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + await RedisAsync.SetRangeInHashAsync(HashId, newStringMap); + + newStringMap.Each(x => stringMap.Add(x.Key, x.Value)); + + var members = await RedisAsync.GetAllEntriesFromHashAsync(HashId); + Assert.That(members, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_GetItemsFromHash() + { + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashId, x.Key, x.Value); + } + + var expectedValues = new List { stringMap["one"], stringMap["two"], null }; + var hashValues = await RedisAsync.GetValuesFromHashAsync(HashId, new[] { "one", "two", "not-exists" }); + + Assert.That(hashValues.EquivalentTo(expectedValues), Is.True); + } + [Test] + public async Task Can_hash_set() + { + var key = HashId + "key"; + var field = GetBytes("foo"); + var value = GetBytes("value"); + Assert.AreEqual(await NativeAsync.HDelAsync(key, field), 0); + Assert.AreEqual(await NativeAsync.HSetAsync(key, field, value), 1); + Assert.AreEqual(await NativeAsync.HDelAsync(key, field), 1); + } + + [Test] + public async Task Can_hash_multi_set_and_get() + { + const string Key = HashId + "multitest"; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var fields = new Dictionary { { "field1", "1" }, { "field2", "2" }, { "field3", "3" } }; + + await RedisAsync.SetRangeInHashAsync(Key, fields); + var members = await RedisAsync.GetAllEntriesFromHashAsync(Key); + foreach (var member in members) + { + Assert.IsTrue(fields.ContainsKey(member.Key)); + Assert.AreEqual(fields[member.Key], member.Value); + } + } + + public class HashTest + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public async Task Can_store_as_Hash() + { + var dto = new HashTest { Id = 1 }; + await RedisAsync.StoreAsHashAsync(dto); + + var storedHash = await RedisAsync.GetHashKeysAsync(dto.ToUrn()); + Assert.That(storedHash, Is.EquivalentTo(new[] { "Id" })); + + var hold = RedisClient.ConvertToHashFn; + RedisClient.ConvertToHashFn = o => + { + var map = new Dictionary(); + o.ToObjectDictionary().Each(x => map[x.Key] = (x.Value ?? "").ToJsv()); + return map; + }; + + await RedisAsync.StoreAsHashAsync(dto); + storedHash = await RedisAsync.GetHashKeysAsync(dto.ToUrn()); + Assert.That(storedHash, Is.EquivalentTo(new[] { "Id", "Name" })); + + RedisClient.ConvertToHashFn = hold; + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientListTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientListTests.Async.cs new file mode 100644 index 00000000..b68445a2 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientListTests.Async.cs @@ -0,0 +1,502 @@ +using System; +using System.Collections.Generic; +using NUnit.Framework; +using System.Linq; +using ServiceStack.Text; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientListTestsAsync + : RedisClientTestsBaseAsync + { + const string ListId = "rcl_testlist"; + const string ListId2 = "rcl_testlist2"; + private List storeMembers; + + public RedisClientListTestsAsync() + { + CleanMask = "rcl_testlist*"; + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + storeMembers = new List { "one", "two", "three", "four" }; + } + + private static void AssertAreEqual(List actualList, List expectedList) + { + Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); + var i = 0; + actualList.ForEach(x => Assert.That(x, Is.EqualTo(expectedList[i++]))); + } + + private static void AssertAreEqual(List actualList, Queue expectedList) + { + Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); + actualList.ForEach(x => Assert.That(x, Is.EqualTo(expectedList.Dequeue()))); + } + + [Test] + public async Task Can_PopAndPushItemBetweenLists() + { + await RedisAsync.AddItemToListAsync(ListId, "1"); + await RedisAsync.PopAndPushItemBetweenListsAsync(ListId, ListId2); + } + + [Test] + public async Task Can_BlockingPopAndPushItemBetweenLists() + { + await RedisAsync.AddItemToListAsync(ListId, "A"); + await RedisAsync.AddItemToListAsync(ListId, "B"); + var r = await RedisAsync.BlockingPopAndPushItemBetweenListsAsync(ListId, ListId2, new TimeSpan(0, 0, 1)); + + Assert.That(r, Is.EqualTo("B")); + } + + [Test] + public async Task Can_Timeout_BlockingPopAndPushItemBetweenLists() + { + var r = await RedisAsync.BlockingPopAndPushItemBetweenListsAsync(ListId, ListId2, new TimeSpan(0, 0, 1)); + Assert.That(r, Is.Null); + } + + [Test] + public async Task Can_AddToList_and_GetAllFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_AddRangeToList_and_GetAllFromList() + { + await RedisAsync.AddRangeToListAsync(ListId, storeMembers); + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_PrependRangeToList_and_GetAllFromList() + { + await RedisAsync.PrependRangeToListAsync(ListId, storeMembers); + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_GetListCount() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var listCount = await RedisAsync.GetListCountAsync(ListId); + + Assert.That(listCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Can_GetItemFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var storeMember3 = storeMembers[2]; + var item3 = await RedisAsync.GetItemFromListAsync(ListId, 2); + + Assert.That(item3, Is.EqualTo(storeMember3)); + } + + [Test] + public async Task Can_SetItemInList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + storeMembers[2] = "five"; + await RedisAsync.SetItemInListAsync(ListId, 2, "five"); + + var members = await RedisAsync.GetAllItemsFromListAsync(ListId); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_PopFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item4 = await RedisAsync.PopItemFromListAsync(ListId); + + Assert.That(item4, Is.EqualTo("four")); + } + + [Test] + public async Task Can_EnqueueOnList() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + while (queue.Count > 0) + { + var actual = await RedisAsync.DequeueItemFromListAsync(ListId); + Assert.That(actual, Is.EqualTo(queue.Dequeue())); + } + } + + [Test] + public async Task Can_DequeueFromList() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + var item1 = await RedisAsync.DequeueItemFromListAsync(ListId); + + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task PopAndPushSameAsDequeue() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + var item1 = await RedisAsync.PopAndPushItemBetweenListsAsync(ListId, ListId2); + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task Can_BlockingDequeueFromList() + { + var queue = new Queue(); + storeMembers.ForEach(queue.Enqueue); + foreach (var x in storeMembers) + { + await RedisAsync.EnqueueItemOnListAsync(ListId, x); + } + + var item1 = await RedisAsync.BlockingDequeueItemFromListAsync(ListId, null); + + Assert.That(item1, Is.EqualTo(queue.Dequeue())); + } + + [Test] + public async Task BlockingDequeueFromList_Can_TimeOut() + { + var item1 = await RedisAsync.BlockingDequeueItemFromListAsync(ListId, TimeSpan.FromSeconds(1)); + Assert.That(item1, Is.Null); + } + + [Test] + public async Task Can_PushToList() + { + var stack = new Stack(); + storeMembers.ForEach(stack.Push); + foreach (var x in storeMembers) + { + await RedisAsync.PushItemToListAsync(ListId, x); + } + + while (stack.Count > 0) + { + var actual = await RedisAsync.PopItemFromListAsync(ListId); + Assert.That(actual, Is.EqualTo(stack.Pop())); + } + } + + [Test] + public async Task Can_BlockingPopFromList() + { + var stack = new Stack(); + storeMembers.ForEach(stack.Push); + foreach (var x in storeMembers) + { + await RedisAsync.PushItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.BlockingPopItemFromListAsync(ListId, null); + + Assert.That(item1, Is.EqualTo(stack.Pop())); + } + + [Test] + public async Task BlockingPopFromList_Can_TimeOut() + { + var item1 = await RedisAsync.BlockingPopItemFromListAsync(ListId, TimeSpan.FromSeconds(1)); + Assert.That(item1, Is.Null); + } + + [Test] + public async Task Can_RemoveStartFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.RemoveStartFromListAsync(ListId); + + Assert.That(item1, Is.EqualTo(storeMembers.First())); + } + + [Test] + public async Task Can_RemoveEndFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.RemoveEndFromListAsync(ListId); + + Assert.That(item1, Is.EqualTo(storeMembers.Last())); + } + + [Test] + public async Task Can_BlockingRemoveStartFromList() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var item1 = await RedisAsync.BlockingRemoveStartFromListAsync(ListId, null); + + Assert.That(item1, Is.EqualTo(storeMembers.First())); + } + + [Test] + public async Task Can_MoveBetweenLists() + { + var list1Members = new List { "one", "two", "three", "four" }; + var list2Members = new List { "five", "six", "seven" }; + const string item4 = "four"; + + foreach (var x in list1Members) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + foreach (var x in list2Members) + { + await RedisAsync.AddItemToListAsync(ListId2, x); + } + + list1Members.Remove(item4); + list2Members.Insert(0, item4); + await RedisAsync.PopAndPushItemBetweenListsAsync(ListId, ListId2); + + var readList1 = await RedisAsync.GetAllItemsFromListAsync(ListId); + var readList2 = await RedisAsync.GetAllItemsFromListAsync(ListId2); + + AssertAreEqual(readList1, list1Members); + AssertAreEqual(readList2, list2Members); + } + + + [Test] + public async Task Can_enumerate_small_list() + { + foreach (var x in storeMembers) + { + await RedisAsync.AddItemToListAsync(ListId, x); + } + + var readMembers = new List(); + await foreach (var item in RedisAsync.Lists[ListId]) + { + readMembers.Add(item); + } + AssertAreEqual(readMembers, storeMembers); + } + + [Test] + public async Task Can_enumerate_large_list() + { + if (TestConfig.IgnoreLongTests) return; + + const int listSize = 2500; + + storeMembers = new List(); + for (int x = 0; x < listSize; x++) + { + await RedisAsync.AddItemToListAsync(ListId, x.ToString()); + storeMembers.Add(x.ToString()); + } + + var members = new List(); + await foreach (var item in RedisAsync.Lists[ListId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + var members = await ToListAsync(list); + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_Clear_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + Assert.That(await list.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await list.ClearAsync(); + + Assert.That(await list.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + Assert.That(await list.ContainsAsync("two"), Is.True); + Assert.That(await list.ContainsAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + storeMembers.Remove("two"); + await list.RemoveAsync("two"); + + var members = await ToListAsync(list); + + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_RemoveAt_value_from_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + storeMembers.RemoveAt(2); + await list.RemoveAtAsync(2); + + var members = await ToListAsync(list); + + AssertAreEqual(members, storeMembers); + } + + [Test] + public async Task Can_get_default_index_from_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + for (var i = 0; i < storeMembers.Count; i++) + { + Assert.That(await list.ElementAtAsync(i), Is.EqualTo(storeMembers[i])); + } + } + + [Test] + public async Task Can_test_for_IndexOf_in_IList() + { + var list = RedisAsync.Lists[ListId]; + foreach (var x in storeMembers) + { + await list.AddAsync(x); + } + + foreach (var item in storeMembers) + { + Assert.That(await list.IndexOfAsync(item), Is.EqualTo(storeMembers.IndexOf(item))); + } + } + + [Test] + public async Task Can_AddRangeToList_and_GetSortedItems() + { + await RedisAsync.PrependRangeToListAsync(ListId, storeMembers); + + var members = await RedisAsync.GetSortedItemsFromListAsync(ListId, new SortOptions { SortAlpha = true, SortDesc = true, Skip = 1, Take = 2 }); + AssertAreEqual(members, storeMembers.OrderByDescending(s => s).Skip(1).Take(2).ToList()); + } + + public class Test + { + public string A { get; set; } + } + + [Test] + public async Task RemoveAll_removes_all_items_from_Named_List() + { + var redis = RedisAsync.As(); + + var clientesRepo = redis.Lists["repo:Client:Test"]; + + Assert.IsTrue(await clientesRepo.CountAsync() == 0, "Count 1 = " + await clientesRepo.CountAsync()); + await clientesRepo.AddAsync(new Test() { A = "Test" }); + Assert.IsTrue(await clientesRepo.CountAsync() == 1, "Count 2 = " + await clientesRepo.CountAsync()); + await clientesRepo.RemoveAllAsync(); + Assert.IsTrue(await clientesRepo.CountAsync() == 0, "Count 3 = " + await clientesRepo.CountAsync()); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs index d3c07bd4..e7003a4b 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientListTests.cs @@ -35,7 +35,6 @@ private static void AssertAreEqual(List actualList, List expecte private static void AssertAreEqual(List actualList, Queue expectedList) { Assert.That(actualList, Has.Count.EqualTo(expectedList.Count)); - var i = 0; actualList.ForEach(x => Assert.That(x, Is.EqualTo(expectedList.Dequeue()))); } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSetTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientSetTests.Async.cs new file mode 100644 index 00000000..ad23cc5a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientSetTests.Async.cs @@ -0,0 +1,335 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisClientSetTestsAsync + : RedisClientTestsBaseAsync + { + private const string SetIdSuffix = "testset"; + private List storeMembers; + + private string SetId + { + get + { + return this.PrefixedKey(SetIdSuffix); + } + } + + [SetUp] + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "RedisClientSetTests"; + storeMembers = new List { "one", "two", "three", "four" }; + } + + [Test] + public async Task Can_AddToSet_and_GetAllFromSet() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var members = await RedisAsync.GetAllItemsFromSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_AddRangeToSet_and_GetAllFromSet() + { + await RedisAsync.AddRangeToSetAsync(SetId, storeMembers); + + var members = await RedisAsync.GetAllItemsFromSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveFromSet() + { + const string removeMember = "two"; + + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + await RedisAsync.RemoveItemFromSetAsync(SetId, removeMember); + + storeMembers.Remove(removeMember); + + var members = await RedisAsync.GetAllItemsFromSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_PopFromSet() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var member = await RedisAsync.PopItemFromSetAsync(SetId); + + Assert.That(storeMembers.Contains(member), Is.True); + } + + [Test] + public async Task Can_MoveBetweenSets() + { + string fromSetId = PrefixedKey("testmovefromset"); + string toSetId = PrefixedKey("testmovetoset"); + const string moveMember = "four"; + var fromSetIdMembers = new List { "one", "two", "three", "four" }; + var toSetIdMembers = new List { "five", "six", "seven" }; + + await fromSetIdMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(fromSetId, x)); + await toSetIdMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(toSetId, x)); + + await RedisAsync.MoveBetweenSetsAsync(fromSetId, toSetId, moveMember); + + fromSetIdMembers.Remove(moveMember); + toSetIdMembers.Add(moveMember); + + var readFromSetId = await RedisAsync.GetAllItemsFromSetAsync(fromSetId); + var readToSetId = await RedisAsync.GetAllItemsFromSetAsync(toSetId); + + Assert.That(readFromSetId, Is.EquivalentTo(fromSetIdMembers)); + Assert.That(readToSetId, Is.EquivalentTo(toSetIdMembers)); + } + + [Test] + public async Task Can_GetSetCount() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var setCount = await RedisAsync.GetSetCountAsync(SetId); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Does_SetContainsValue() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + Assert.That(await RedisAsync.SetContainsItemAsync(SetId, existingMember), Is.True); + Assert.That(await RedisAsync.SetContainsItemAsync(SetId, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + var intersectingMembers = await RedisAsync.GetIntersectFromSetsAsync(new[] { set1Name, set2Name }); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public async Task Can_Store_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + string storeSetName = PrefixedKey("testintersectsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + await RedisAsync.StoreIntersectFromSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var intersectingMembers = await RedisAsync.GetAllItemsFromSetAsync(storeSetName); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public async Task Can_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + var unionMembers = await RedisAsync.GetUnionFromSetsAsync(new[] { set1Name, set2Name }); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public async Task Can_Store_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + string storeSetName = PrefixedKey("testunionsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + + await RedisAsync.StoreUnionFromSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var unionMembers = await RedisAsync.GetAllItemsFromSetAsync(storeSetName); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public async Task Can_DiffBetweenSets() + { + string set1Name = PrefixedKey("testdiffset1"); + string set2Name = PrefixedKey("testdiffset2"); + string set3Name = PrefixedKey("testdiffset3"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + var set3Members = new List { "one", "five", "seven", "eleven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + await set3Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set3Name, x)); + + var diffMembers = await RedisAsync.GetDifferencesFromSetAsync(set1Name, new[] { set2Name, set3Name }); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { "two", "three" })); + } + + [Test] + public async Task Can_Store_DiffBetweenSets() + { + string set1Name = PrefixedKey("testdiffset1"); + string set2Name = PrefixedKey("testdiffset2"); + string set3Name = PrefixedKey("testdiffset3"); + string storeSetName = PrefixedKey("testdiffsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + var set3Members = new List { "one", "five", "seven", "eleven" }; + + await set1Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set1Name, x)); + await set2Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set2Name, x)); + await set3Members.ForEachAsync(x => RedisAsync.AddItemToSetAsync(set3Name, x)); + + await RedisAsync.StoreDifferencesFromSetAsync(storeSetName, set1Name, new[] { set2Name, set3Name }); + + var diffMembers = await RedisAsync.GetAllItemsFromSetAsync(storeSetName); + + Assert.That(diffMembers, Is.EquivalentTo( + new List { "two", "three" })); + } + + [Test] + public async Task Can_GetRandomEntryFromSet() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var randomEntry = await RedisAsync.GetRandomItemFromSetAsync(SetId); + + Assert.That(storeMembers.Contains(randomEntry), Is.True); + } + + + [Test] + public async Task Can_enumerate_small_ICollection_Set() + { + await storeMembers.ForEachAsync(x => RedisAsync.AddItemToSetAsync(SetId, x)); + + var members = new List(); + await foreach (var item in RedisAsync.Sets[SetId]) + { + members.Add(item); + } + members.Sort(); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + storeMembers = new List(); + await setSize.TimesAsync(async x => + { + await RedisAsync.AddItemToSetAsync(SetId, x.ToString()); + storeMembers.Add(x.ToString()); + }); + + var members = new List(); + await foreach (var item in RedisAsync.Sets[SetId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + var members = await list.ToListAsync(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Clear_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + Assert.That(await list.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await list.ClearAsync(); + + Assert.That(await list.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + Assert.That(await list.ContainsAsync("two"), Is.True); + Assert.That(await list.ContainsAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_ICollection_Set() + { + var list = RedisAsync.Sets[SetId]; + await storeMembers.ForEachAsync(x => list.AddAsync(x)); + + storeMembers.Remove("two"); + await list.RemoveAsync("two"); + + var members = await list.ToListAsync(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.Async.cs new file mode 100644 index 00000000..ae31d2d3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.Async.cs @@ -0,0 +1,454 @@ +using NUnit.Framework; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisClientSortedSetTestsAsync + : RedisClientTestsBaseAsync + { + private const string SetIdSuffix = "testzset"; + private List storeMembers; + + private string SetId + { + get + { + return PrefixedKey(SetIdSuffix); + } + } + + Dictionary stringDoubleMap; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "RedisClientSortedSetTests"; + storeMembers = new List { "one", "two", "three", "four" }; + + stringDoubleMap = new Dictionary { + {"one",1}, {"two",2}, {"three",3}, {"four",4} + }; + } + + [Test] + public async Task Can_AddItemToSortedSet_and_GetAllFromSet() + { + var i = 0; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members.EquivalentTo(storeMembers), Is.True); + } + + [Test] + public async Task Can_AddRangeToSortedSet_and_GetAllFromSet() + { + var success = await RedisAsync.AddRangeToSortedSetAsync(SetId, storeMembers, 1); + Assert.That(success, Is.True); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task AddToSet_without_score_adds_an_implicit_lexical_order_score() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + Assert.That(members.EquivalentTo(storeMembers), Is.True); + } + + [Test] + public async Task AddToSet_with_same_score_is_still_returned_in_lexical_order_score() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, 1)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + Assert.That(members.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveFromSet() + { + const string removeMember = "two"; + + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + await RedisAsync.RemoveItemFromSortedSetAsync(SetId, removeMember); + + storeMembers.Remove(removeMember); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_RemoveItemsFromSortedSet() + { + var removeMembers = new[] { "two" , "four", "six" }; + + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var removeCount = await RedisAsync.RemoveItemsFromSortedSetAsync(SetId, removeMembers.ToList()); + Assert.That(removeCount, Is.EqualTo(2)); + + removeMembers.Each(x => storeMembers.Remove(x)); + + var members = await RedisAsync.GetAllItemsFromSortedSetAsync(SetId); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_PopFromSet() + { + var i = 0; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + var member = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync(SetId); + + Assert.That(member, Is.EqualTo("four")); + } + + [Test] + public async Task Can_GetSetCount() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var setCount = await RedisAsync.GetSortedSetCountAsync(SetId); + + Assert.That(setCount, Is.EqualTo(storeMembers.Count)); + } + + [Test] + public async Task Can_GetSetCountByScores() + { + var scores = new List(); + + await storeMembers.ForEachAsync(async x => + { + await RedisAsync.AddItemToSortedSetAsync(SetId, x); + scores.Add(RedisClient.GetLexicalScore(x)); + }); + + Assert.That(await RedisAsync.GetSortedSetCountAsync(SetId, scores.Min(), scores.Max()), Is.EqualTo(storeMembers.Count())); + Assert.That(await RedisAsync.GetSortedSetCountAsync(SetId, scores.Min(), scores.Min()), Is.EqualTo(1)); + } + + [Test] + public async Task Does_SortedSetContainsValue() + { + const string existingMember = "two"; + const string nonExistingMember = "five"; + + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + Assert.That(await RedisAsync.SortedSetContainsItemAsync(SetId, existingMember), Is.True); + Assert.That(await RedisAsync.SortedSetContainsItemAsync(SetId, nonExistingMember), Is.False); + } + + [Test] + public async Task Can_GetItemIndexInSortedSet_in_Asc_and_Desc() + { + var i = 10; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "one"), Is.EqualTo(0)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "two"), Is.EqualTo(1)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "three"), Is.EqualTo(2)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "four"), Is.EqualTo(3)); + + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "one"), Is.EqualTo(3)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "two"), Is.EqualTo(2)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "three"), Is.EqualTo(1)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "four"), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Store_IntersectBetweenSets() + { + string set1Name = PrefixedKey("testintersectset1"); + string set2Name = PrefixedKey("testintersectset2"); + string storeSetName = PrefixedKey("testintersectsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set1Name, x)); + await set2Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set2Name, x)); + + await RedisAsync.StoreIntersectFromSortedSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var intersectingMembers = await RedisAsync.GetAllItemsFromSortedSetAsync(storeSetName); + + Assert.That(intersectingMembers, Is.EquivalentTo(new List { "four", "five" })); + } + + [Test] + public async Task Can_Store_UnionBetweenSets() + { + string set1Name = PrefixedKey("testunionset1"); + string set2Name = PrefixedKey("testunionset2"); + string storeSetName = PrefixedKey("testunionsetstore"); + var set1Members = new List { "one", "two", "three", "four", "five" }; + var set2Members = new List { "four", "five", "six", "seven" }; + + await set1Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set1Name, x)); + await set2Members.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(set2Name, x)); + + await RedisAsync.StoreUnionFromSortedSetsAsync(storeSetName, new[] { set1Name, set2Name }); + + var unionMembers = await RedisAsync.GetAllItemsFromSortedSetAsync(storeSetName); + + Assert.That(unionMembers, Is.EquivalentTo( + new List { "one", "two", "three", "four", "five", "six", "seven" })); + } + + [Test] + public async Task Can_pop_items_with_lowest_and_highest_scores_from_sorted_set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + + var lowestScore = await RedisAsync.PopItemWithLowestScoreFromSortedSetAsync(SetId); + Assert.That(lowestScore, Is.EqualTo(storeMembers.First())); + + var highestScore = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync(SetId); + Assert.That(highestScore, Is.EqualTo(storeMembers[storeMembers.Count - 1])); + } + + [Test, Ignore("seems unstable?")] + public async Task Can_GetRangeFromSortedSetByLowestScore_from_sorted_set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + storeMembers.Sort((x, y) => x.CompareTo(y)); + var memberRage = storeMembers.Where(x => + x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); + + var range = await RedisAsync.GetRangeFromSortedSetByLowestScoreAsync(SetId, "four", "three"); + Assert.That(range.EquivalentTo(memberRage)); + } + + [Test] + public async Task Can_IncrementItemInSortedSet() + { + await stringDoubleMap.ForEachAsync(async (k,v) => await RedisAsync.AddItemToSortedSetAsync(SetId, k, v)); + + var currentScore = await RedisAsync.IncrementItemInSortedSetAsync(SetId, "one", 3); + stringDoubleMap["one"] = stringDoubleMap["one"] + 3; + Assert.That(currentScore, Is.EqualTo(stringDoubleMap["one"])); + + currentScore = await RedisAsync.IncrementItemInSortedSetAsync(SetId, "four", -3); + stringDoubleMap["four"] = stringDoubleMap["four"] - 3; + Assert.That(currentScore, Is.EqualTo(stringDoubleMap["four"])); + + var map = await RedisAsync.GetAllWithScoresFromSortedSetAsync(SetId); + + Assert.That(stringDoubleMap.UnorderedEquivalentTo(map)); + } + + [Test] + public async Task Can_WorkInSortedSetUnderDifferentCulture() + { +#if NETCORE + var prevCulture = CultureInfo.CurrentCulture; + CultureInfo.CurrentCulture = new CultureInfo("ru-RU"); +#else + var prevCulture = Thread.CurrentThread.CurrentCulture; + Thread.CurrentThread.CurrentCulture = CultureInfo.CreateSpecificCulture("ru-RU"); +#endif + await RedisAsync.AddItemToSortedSetAsync(SetId, "key", 123.22); + + var map = await RedisAsync.GetAllWithScoresFromSortedSetAsync(SetId); + + Assert.AreEqual(123.22, map["key"]); + +#if NETCORE + CultureInfo.CurrentCulture = prevCulture; +#else + Thread.CurrentThread.CurrentCulture = prevCulture; +#endif + } + + + [Ignore("Not implemented yet")] + [Test] + public async Task Can_GetRangeFromSortedSetByHighestScore_from_sorted_set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + storeMembers.Sort((x, y) => y.CompareTo(x)); + var memberRage = storeMembers.Where(x => + x.CompareTo("four") >= 0 && x.CompareTo("three") <= 0).ToList(); + + var range = await RedisAsync.GetRangeFromSortedSetByHighestScoreAsync(SetId, "four", "three"); + Assert.That(range.EquivalentTo(memberRage)); + } + + [Test] + public async Task Can_get_index_and_score_from_SortedSet() + { + storeMembers = new List { "a", "b", "c", "d" }; + const double initialScore = 10d; + var i = initialScore; + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x, i++)); + + Assert.That(await RedisAsync.GetItemIndexInSortedSetAsync(SetId, "a"), Is.EqualTo(0)); + Assert.That(await RedisAsync.GetItemIndexInSortedSetDescAsync(SetId, "a"), Is.EqualTo(storeMembers.Count - 1)); + + Assert.That(await RedisAsync.GetItemScoreInSortedSetAsync(SetId, "a"), Is.EqualTo(initialScore)); + Assert.That(await RedisAsync.GetItemScoreInSortedSetAsync(SetId, "d"), Is.EqualTo(initialScore + storeMembers.Count - 1)); + } + + [Test] + public async Task Can_enumerate_small_ICollection_Set() + { + await storeMembers.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(SetId, x)); + + var members = new List(); + await foreach (var item in RedisAsync.SortedSets[SetId]) + { + members.Add(item); + } + members.Sort(); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_enumerate_large_ICollection_Set() + { + if (TestConfig.IgnoreLongTests) return; + + const int setSize = 2500; + + storeMembers = new List(); + await setSize.TimesAsync(async x => + { + await RedisAsync.AddItemToSortedSetAsync(SetId, x.ToString()); + storeMembers.Add(x.ToString()); + }); + + var members = new List(); + await foreach (var item in RedisAsync.SortedSets[SetId]) + { + members.Add(item); + } + members.Sort((x, y) => int.Parse(x).CompareTo(int.Parse(y))); + Assert.That(members.Count, Is.EqualTo(storeMembers.Count)); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Add_to_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + var members = await list.ToListAsync(); + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Can_Clear_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + Assert.That(await list.CountAsync(), Is.EqualTo(storeMembers.Count)); + + await list.ClearAsync(); + + Assert.That(await list.CountAsync(), Is.EqualTo(0)); + } + + [Test] + public async Task Can_Test_Contains_in_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + Assert.That(await list.ContainsAsync("two"), Is.True); + Assert.That(await list.ContainsAsync("five"), Is.False); + } + + [Test] + public async Task Can_Remove_value_from_ICollection_Set() + { + var list = RedisAsync.SortedSets[SetId]; + await storeMembers.ForEachAsync(async x => await list.AddAsync(x)); + + storeMembers.Remove("two"); + await list.RemoveAsync("two"); + + var members = await list.ToListAsync(); + + Assert.That(members, Is.EquivalentTo(storeMembers)); + } + + [Test] + public async Task Score_from_non_existent_item_returns_NaN() + { + var score = await RedisAsync.GetItemScoreInSortedSetAsync("nonexistentset", "value"); + + Assert.That(score, Is.EqualTo(Double.NaN)); + } + + [Test] + public async Task Can_add_large_score_to_sortedset() + { + await RedisAsync.AddItemToSortedSetAsync(SetId, "value", 12345678901234567890d); + var score = await RedisAsync.GetItemScoreInSortedSetAsync(SetId, "value"); + + Assert.That(score, Is.EqualTo(12345678901234567890d)); + } + + public class Article + { + public int Id { get; set; } + public string Title { get; set; } + public DateTime ModifiedDate { get; set; } + } + + [Test] + public async Task Can_use_SortedIndex_to_store_articles_by_Date() + { + var redisArticles = RedisAsync.As
(); + + var articles = new[] + { + new Article { Id = 1, Title = "Article 1", ModifiedDate = new DateTime(2015, 01, 02) }, + new Article { Id = 2, Title = "Article 2", ModifiedDate = new DateTime(2015, 01, 01) }, + new Article { Id = 3, Title = "Article 3", ModifiedDate = new DateTime(2015, 01, 03) }, + }; + + await redisArticles.StoreAllAsync(articles); + + const string LatestArticlesSet = "urn:Article:modified"; + + foreach (var article in articles) + { + await RedisAsync.AddItemToSortedSetAsync(LatestArticlesSet, article.Id.ToString(), article.ModifiedDate.Ticks); + } + + var articleIds = await RedisAsync.GetAllItemsFromSortedSetDescAsync(LatestArticlesSet); + articleIds.PrintDump(); + + var latestArticles = await redisArticles.GetByIdsAsync(articleIds); + latestArticles.PrintDump(); + } + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs index f43dc190..bfcb695d 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientSortedSetTests.cs @@ -229,7 +229,7 @@ public void Can_pop_items_with_lowest_and_highest_scores_from_sorted_set() Assert.That(highestScore, Is.EqualTo(storeMembers[storeMembers.Count - 1])); } - [Test] + [Test, Ignore("seems unstable?")] public void Can_GetRangeFromSortedSetByLowestScore_from_sorted_set() { storeMembers.ForEach(x => Redis.AddItemToSortedSet(SetId, x)); @@ -264,8 +264,10 @@ public void Can_IncrementItemInSortedSet() public void Can_WorkInSortedSetUnderDifferentCulture() { #if NETCORE + var prevCulture = CultureInfo.CurrentCulture; CultureInfo.CurrentCulture = new CultureInfo("ru-RU"); #else + var prevCulture = Thread.CurrentThread.CurrentCulture; Thread.CurrentThread.CurrentCulture = CultureInfo.CreateSpecificCulture("ru-RU"); #endif Redis.AddItemToSortedSet(SetId, "key", 123.22); @@ -273,6 +275,12 @@ public void Can_WorkInSortedSetUnderDifferentCulture() var map = Redis.GetAllWithScoresFromSortedSet(SetId); Assert.AreEqual(123.22, map["key"]); + +#if NETCORE + CultureInfo.CurrentCulture = prevCulture; +#else + Thread.CurrentThread.CurrentCulture = prevCulture; +#endif } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientTests.Async.cs new file mode 100644 index 00000000..b5b7aad3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientTests.Async.cs @@ -0,0 +1,673 @@ +using NUnit.Framework; +using ServiceStack.Redis.Support.Locking; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisClientTestsAsync + : RedisClientTestsBaseAsync + { + const string Value = "Value"; + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = nameof(RedisClientTestsAsync); + } + + [Test] + public async Task Can_Set_and_Get_string() + { + await RedisAsync.SetValueAsync("key", Value); + var valueBytes = await NativeAsync.GetAsync("key"); + var valueString = GetString(valueBytes); + await RedisAsync.RemoveAsync("key"); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_space() + { + await RedisAsync.SetValueAsync("key with space", Value); + var valueBytes = await NativeAsync.GetAsync("key with space"); + var valueString = GetString(valueBytes); + await RedisAsync.RemoveAsync("key with space"); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_spaces() + { + const string key = "key with spaces"; + + await RedisAsync.SetValueAsync(key, Value); + var valueBytes = await NativeAsync.GetAsync(key); + var valueString = GetString(valueBytes); + + Assert.That(valueString, Is.EqualTo(Value)); + } + + [Test] + public async Task Can_Set_and_Get_key_with_all_byte_values() + { + const string key = "bytesKey"; + + var value = new byte[256]; + for (var i = 0; i < value.Length; i++) + { + value[i] = (byte)i; + } + + await RedisAsync.SetAsync(key, value); + var resultValue = await NativeAsync.GetAsync(key); + + Assert.That(resultValue, Is.EquivalentTo(value)); + } + + [Test] + public async Task GetKeys_returns_matching_collection() + { + await RedisAsync.SetAsync("ss-tests:a1", "One"); + await RedisAsync.SetAsync("ss-tests:a2", "One"); + await RedisAsync.SetAsync("ss-tests:b3", "One"); + + var matchingKeys = await RedisAsync.SearchKeysAsync("ss-tests:a*"); + + Assert.That(matchingKeys.Count, Is.EqualTo(2)); + } + + [Test] + public async Task GetKeys_on_non_existent_keys_returns_empty_collection() + { + var matchingKeys = await RedisAsync.SearchKeysAsync("ss-tests:NOTEXISTS"); + + Assert.That(matchingKeys.Count, Is.EqualTo(0)); + } + + [Test] + public async Task Can_get_Types() + { + await RedisAsync.SetValueAsync("string", "string"); + await RedisAsync.AddItemToListAsync("list", "list"); + await RedisAsync.AddItemToSetAsync("set", "set"); + await RedisAsync.AddItemToSortedSetAsync("sortedset", "sortedset"); + await RedisAsync.SetEntryInHashAsync("hash", "key", "val"); + + Assert.That(await RedisAsync.GetEntryTypeAsync("nokey"), Is.EqualTo(RedisKeyType.None)); + Assert.That(await RedisAsync.GetEntryTypeAsync("string"), Is.EqualTo(RedisKeyType.String)); + Assert.That(await RedisAsync.GetEntryTypeAsync("list"), Is.EqualTo(RedisKeyType.List)); + Assert.That(await RedisAsync.GetEntryTypeAsync("set"), Is.EqualTo(RedisKeyType.Set)); + Assert.That(await RedisAsync.GetEntryTypeAsync("sortedset"), Is.EqualTo(RedisKeyType.SortedSet)); + Assert.That(await RedisAsync.GetEntryTypeAsync("hash"), Is.EqualTo(RedisKeyType.Hash)); + } + + [Test] + public async Task Can_delete_keys() + { + await RedisAsync.SetValueAsync("key", "val"); + + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + + await RedisAsync.RemoveAsync("key"); + + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add("key" + i, "val" + i)); + + await RedisAsync.SetAllAsync(keysMap); + + for (int i = 0; i < 10; i++) + Assert.That(await RedisAsync.ContainsKeyAsync("key" + i), Is.True); + + await RedisAsync.RemoveEntryAsync(keysMap.Keys.ToArray()); + + for (int i = 0; i < 10; i++) + Assert.That(await RedisAsync.ContainsKeyAsync("key" + i), Is.False); + } + + [Test] + public async Task Can_get_RandomKey() + { + await RedisAsync.SelectAsync(15); + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add(RedisRaw.NamespacePrefix + "key" + i, "val" + i)); + + await RedisAsync.SetAllAsync(keysMap); + + var randKey = await RedisAsync.GetRandomKeyAsync(); + + Assert.That(keysMap.ContainsKey(randKey), Is.True); + } + + [Test] + public async Task Can_RenameKey() + { + await RedisAsync.SetValueAsync("oldkey", "val"); + await NativeAsync.RenameAsync("oldkey", "newkey"); + + Assert.That(await RedisAsync.ContainsKeyAsync("oldkey"), Is.False); + Assert.That(await RedisAsync.ContainsKeyAsync("newkey"), Is.True); + } + + [Test] + public async Task Can_Expire() + { + await RedisAsync.SetValueAsync("key", "val"); + await RedisAsync.ExpireEntryInAsync("key", TimeSpan.FromSeconds(1)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Expire_Ms() + { + await RedisAsync.SetValueAsync("key", "val"); + await RedisAsync.ExpireEntryInAsync("key", TimeSpan.FromMilliseconds(100)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(500); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Ignore("Changes in system clock can break test")] + [Test] + public async Task Can_ExpireAt() + { + await RedisAsync.SetValueAsync("key", "val"); + + var unixNow = DateTime.Now.ToUnixTime(); + var in2Secs = unixNow + 2; + + await NativeAsync.ExpireAtAsync("key", in2Secs); + + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(3000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_GetTimeToLive() + { + await RedisAsync.SetValueAsync("key", "val"); + await RedisAsync.ExpireEntryInAsync("key", TimeSpan.FromSeconds(10)); + + var ttl = await RedisAsync.GetTimeToLiveAsync("key"); + Assert.That(ttl.Value.TotalSeconds, Is.GreaterThanOrEqualTo(9)); + await Task.Delay(1700); + + ttl = await RedisAsync.GetTimeToLiveAsync("key"); + Assert.That(ttl.Value.TotalSeconds, Is.LessThanOrEqualTo(9)); + } + + [Test] + public async Task Can_GetServerTime() + { + var now = await RedisAsync.GetServerTimeAsync(); + + now.Kind.PrintDump(); + now.ToString("D").Print(); + now.ToString("T").Print(); + + "UtcNow".Print(); + DateTime.UtcNow.ToString("D").Print(); + DateTime.UtcNow.ToString("T").Print(); + + Assert.That(now.Date, Is.EqualTo(DateTime.UtcNow.Date)); + } + + [Test] + public async Task Can_Ping() + { + Assert.That(await RedisAsync.PingAsync(), Is.True); + } + + [Test] + public async Task Can_Echo() + { + Assert.That(await RedisAsync.EchoAsync("Hello"), Is.EqualTo("Hello")); + } + + [Test] + public async Task Can_SlaveOfNoOne() + { + await NativeAsync.SlaveOfNoOneAsync(); + } + + [Test] + public async Task Can_Save() + { + try + { + await NativeAsync.SaveAsync(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; + + throw; + } + } + + [Test] + public async Task Can_BgSave() + { + try + { + await NativeAsync.BgSaveAsync(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; + + throw; + } + } + + [Test] + public async Task Can_Quit() + { + await NativeAsync.QuitAsync(); + RedisRaw.NamespacePrefix = null; + CleanMask = null; + } + + [Test] + public async Task Can_BgRewriteAof() + { + await NativeAsync.BgRewriteAofAsync(); + } + + [Test] + [Ignore("Works too well and shutdown the server")] + public async Task Can_Shutdown() + { + await RedisAsync.ShutdownAsync(); + } + + [Test] + public async Task Can_get_Keys_with_pattern() + { + for (int i = 0; i < 5; i++) + await RedisAsync.SetValueAsync("k1:" + i, "val"); + for (int i = 0; i < 5; i++) + await RedisAsync.SetValueAsync("k2:" + i, "val"); + + var keys = await NativeAsync.KeysAsync("k1:*"); + Assert.That(keys.Length, Is.EqualTo(5)); + + var scanKeys = await RedisAsync.SearchKeysAsync("k1:*"); + Assert.That(scanKeys.Count, Is.EqualTo(5)); + } + + [Test] + public async Task Can_GetAll() + { + var keysMap = new Dictionary(); + + 10.Times(i => keysMap.Add("key" + i, "val" + i)); + + await RedisAsync.SetAllAsync(keysMap); + + var map = await RedisAsync.GetAllAsync(keysMap.Keys); + var mapKeys = await RedisAsync.GetValuesAsync(keysMap.Keys.ToList()); + + foreach (var entry in keysMap) + { + Assert.That(map.ContainsKey(entry.Key), Is.True); + Assert.That(mapKeys.Contains(entry.Value), Is.True); + } + } + + [Test] + public async Task Can_GetValues_JSON_strings() + { + var val = "{\"AuthorId\":0,\"Created\":\"\\/Date(1345961754013)\\/\",\"Name\":\"test\",\"Base64\":\"BQELAAEBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP8BWAFYAViA/wFYAVgBWID/AVgBWAFYgP8BWAFYAViA/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWID/gP+A/wFYgP+A/4D/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/AViA/4D/AVgBWAFYgP8BWAFYAViA/4D/AViA/4D/gP+A/4D/gP8BWAFYgP+A/wFYgP+A/wFYgP+A/4D/gP+A/wFYgP+A/wFYgP+A/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWAFYAViA/wFYAVgBWID/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}"; + + await RedisAsync.SetValueAsync("UserLevel/1", val); + + var vals = await RedisAsync.GetValuesAsync(new List(new[] { "UserLevel/1" })); + + Assert.That(vals.Count, Is.EqualTo(1)); + Assert.That(vals[0], Is.EqualTo(val)); + } + + [Test] + public async Task Can_AcquireLock() + { + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); + await RedisAsync.IncrementValueAsync(key); //1 + + Task[] tasks = new Task[5]; + for (int i = 0; i < tasks.Length; i++) + { + var snapsot = i; + tasks[snapsot] = Task.Run( + () => IncrementKeyInsideLock(key, lockKey, snapsot, new RedisClient(TestConfig.SingleHost) { NamespacePrefix = RedisRaw.NamespacePrefix }.ForAsyncOnly()) + ); + } + await Task.WhenAll(tasks); + + var val = await RedisAsync.GetAsync(key); + + Assert.That(val, Is.EqualTo(1 + 5)); + } + + private async Task IncrementKeyInsideLock(String key, String lockKey, int clientNo, IRedisClientAsync client) + { + await using (await client.AcquireLockAsync(lockKey)) + { + Debug.WriteLine(String.Format("client {0} acquired lock", clientNo)); + var val = await client.GetAsync(key); + + await Task.Delay(200); + + await client.SetAsync(key, val + 1); + Debug.WriteLine(String.Format("client {0} released lock", clientNo)); + } + } + + [Test] + public async Task Can_AcquireLock_TimeOut() + { + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); + await RedisAsync.IncrementValueAsync(key); //1 + _ = await RedisAsync.AcquireLockAsync(lockKey); + var waitFor = TimeSpan.FromMilliseconds(1000); + var now = DateTime.Now; + + try + { + await using var client = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await using (await client.AcquireLockAsync(lockKey, waitFor)) + { + await client.IncrementValueAsync(key); //2 + } + } + catch (TimeoutException) + { + var val = await RedisAsync.GetAsync(key); + Assert.That(val, Is.EqualTo(1)); + + var timeTaken = DateTime.Now - now; + Assert.That(timeTaken.TotalMilliseconds > waitFor.TotalMilliseconds, Is.True); + Assert.That(timeTaken.TotalMilliseconds < waitFor.TotalMilliseconds + 1000, Is.True); + return; + } + finally + { + await RedisAsync.RemoveAsync(key); + await RedisAsync.RemoveAsync(lockKey); + } + Assert.Fail("should have Timed out"); + } + + [Test] + public async Task Can_Append() + { + const string expectedString = "Hello, " + "World!"; + await RedisAsync.SetValueAsync("key", "Hello, "); + var currentLength = await RedisAsync.AppendToValueAsync("key", "World!"); + + Assert.That(currentLength, Is.EqualTo(expectedString.Length)); + + var val = await RedisAsync.GetValueAsync("key"); + Assert.That(val, Is.EqualTo(expectedString)); + } + + [Test] + public async Task Can_GetRange() + { + const string helloWorld = "Hello, World!"; + await RedisAsync.SetValueAsync("key", helloWorld); + + var fromIndex = "Hello, ".Length; + var toIndex = "Hello, World".Length - 1; + + var expectedString = helloWorld.Substring(fromIndex, toIndex - fromIndex + 1); + var world = await NativeAsync.GetRangeAsync("key", fromIndex, toIndex); + + Assert.That(world.Length, Is.EqualTo(expectedString.Length)); + } + + [Test] + public async Task Can_create_distributed_lock() + { + var key = "lockkey"; + int lockTimeout = 2; + + var distributedLock = new DistributedLock().AsAsync(); + + var state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + Assert.AreEqual(state.Result, DistributedLock.LOCK_ACQUIRED); + + //can't re-lock + distributedLock = new DistributedLock(); + state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + Assert.AreEqual(state.Result, DistributedLock.LOCK_NOT_ACQUIRED); + + // re-acquire lock after timeout + await Task.Delay(lockTimeout * 1000 + 1000); + distributedLock = new DistributedLock(); + state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + + (var result, var expire) = state; // test decomposition since we are here + Assert.AreEqual(result, DistributedLock.LOCK_RECOVERED); + + Assert.IsTrue(await distributedLock.UnlockAsync(key, expire, RedisAsync)); + + //can now lock + distributedLock = new DistributedLock(); + state = await distributedLock.LockAsync(key, lockTimeout, lockTimeout, RedisAsync); + Assert.AreEqual(state.Result, DistributedLock.LOCK_ACQUIRED); + + //cleanup + Assert.IsTrue(await distributedLock.UnlockAsync(key, state.Expiration, RedisAsync)); + } + + public class MyPoco + { + public int Id { get; set; } + public string Name { get; set; } + } + + [Test] + public async Task Can_StoreObject() + { + object poco = new MyPoco { Id = 1, Name = "Test" }; + + await RedisAsync.StoreObjectAsync(poco); + + Assert.That(await RedisAsync.GetValueAsync(RedisRaw.NamespacePrefix + "urn:mypoco:1"), Is.EqualTo("{\"Id\":1,\"Name\":\"Test\"}")); + + Assert.That(await RedisAsync.PopItemFromSetAsync(RedisRaw.NamespacePrefix + "ids:MyPoco"), Is.EqualTo("1")); + } + + [Test] + public async Task Can_store_multiple_keys() + { + var keys = 5.Times(x => "key" + x); + var vals = 5.Times(x => "val" + x); + + using var redis = RedisClient.New(); + await RedisAsync.SetAllAsync(keys, vals); + + var all = await RedisAsync.GetValuesAsync(keys); + Assert.AreEqual(vals, all); + } + + [Test] + public async Task Can_store_Dictionary() + { + var keys = 5.Times(x => "key" + x); + var vals = 5.Times(x => "val" + x); + var map = new Dictionary(); + keys.ForEach(x => map[x] = "val" + x); + + await using var client = RedisClient.New().ForAsyncOnly(); + await client.SetAllAsync(map); + + var all = await client.GetValuesMapAsync(keys); + Assert.AreEqual(map, all); + } + + [Test] + public async Task Can_store_Dictionary_as_objects() + { + var map = new Dictionary + { + ["key_a"] = "123", + ["key_b"] = null + }; + + await using var client = RedisClient.New().ForAsyncOnly(); + + await client.SetAllAsync(map); + + Assert.That(await client.GetAsync("key_a"), Is.EqualTo("123")); + Assert.That(await client.GetValueAsync("key_b"), Is.EqualTo("")); + } + + + [Test] + public async Task Can_store_Dictionary_as_bytes() + { + var map = new Dictionary + { + ["key_a"] = "123".ToUtf8Bytes(), + ["key_b"] = null + }; + + await using var client = RedisClient.New().ForAsyncOnly(); + + await client.SetAllAsync(map); + + Assert.That(await client.GetAsync("key_a"), Is.EqualTo("123")); + Assert.That(await client.GetValueAsync("key_b"), Is.EqualTo("")); + } + + [Test] + public async Task Should_reset_slowlog() + { + await using var client = RedisClient.New().ForAsyncOnly(); + await client.SlowlogResetAsync(); + } + + [Test] + public async Task Can_get_slowlog() + { + await using var client = RedisClient.New().ForAsyncOnly(); + + var log = await client.GetSlowlogAsync(10); + + foreach (var t in log) + { + Console.WriteLine(t.Id); + Console.WriteLine(t.Duration); + Console.WriteLine(t.Timestamp); + Console.WriteLine(string.Join(":", t.Arguments)); + } + } + + + [Test] + public async Task Can_change_db_at_runtime() + { + await using var redis = new RedisClient(TestConfig.SingleHost, TestConfig.RedisPort, db: 1).ForAsyncOnly(); + + var val = Environment.TickCount; + var key = "test" + val; + try + { + await redis.SetAsync(key, val); + await redis.SelectAsync(2); + Assert.That(await redis.GetAsync(key), Is.EqualTo(0)); + await redis.SelectAsync(1); + Assert.That(await redis.GetAsync(key), Is.EqualTo(val)); + await redis.DisposeAsync(); + } + finally + { + await redis.SelectAsync(1); + await redis.RemoveAsync(key); + } + } + + [Test] + public async Task Can_Set_Expire_Seconds() + { + await RedisAsync.SetValueAsync("key", "val", expireIn: TimeSpan.FromSeconds(1)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Set_Expire_MilliSeconds() + { + await RedisAsync.SetValueAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Set_Expire_Seconds_if_exists() + { + Assert.That(await RedisAsync.SetValueIfExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1500)), + Is.False); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + + await RedisAsync.SetValueAsync("key", "val"); + Assert.That(await RedisAsync.SetValueIfExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.True); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + } + + [Test] + public async Task Can_Set_Expire_Seconds_if_not_exists() + { + Assert.That(await RedisAsync.SetValueIfNotExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.True); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + + Assert.That(await RedisAsync.SetValueIfNotExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Is.False); + + await Task.Delay(2000); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.False); + + await RedisAsync.RemoveAsync("key"); + await RedisAsync.SetValueIfNotExistsAsync("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Assert.That(await RedisAsync.ContainsKeyAsync("key"), Is.True); + } + } + +} diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientTests.cs index d31bd930..19bfbfe5 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientTests.cs @@ -26,7 +26,7 @@ public override void OnBeforeEachTest() [Test] public void Can_Set_and_Get_string() { - Redis.SetEntry("key", Value); + Redis.SetValue("key", Value); var valueBytes = Redis.Get("key"); var valueString = GetString(valueBytes); Redis.Remove("key"); @@ -37,7 +37,7 @@ public void Can_Set_and_Get_string() [Test] public void Can_Set_and_Get_key_with_space() { - Redis.SetEntry("key with space", Value); + Redis.SetValue("key with space", Value); var valueBytes = Redis.Get("key with space"); var valueString = GetString(valueBytes); Redis.Remove("key with space"); @@ -50,7 +50,7 @@ public void Can_Set_and_Get_key_with_spaces() { const string key = "key with spaces"; - Redis.SetEntry(key, Value); + Redis.SetValue(key, Value); var valueBytes = Redis.Get(key); var valueString = GetString(valueBytes); @@ -97,7 +97,7 @@ public void GetKeys_on_non_existent_keys_returns_empty_collection() [Test] public void Can_get_Types() { - Redis.SetEntry("string", "string"); + Redis.SetValue("string", "string"); Redis.AddItemToList("list", "list"); Redis.AddItemToSet("set", "set"); Redis.AddItemToSortedSet("sortedset", "sortedset"); @@ -114,7 +114,7 @@ public void Can_get_Types() [Test] public void Can_delete_keys() { - Redis.SetEntry("key", "val"); + Redis.SetValue("key", "val"); Assert.That(Redis.ContainsKey("key"), Is.True); @@ -153,7 +153,7 @@ public void Can_get_RandomKey() [Test] public void Can_RenameKey() { - Redis.SetEntry("oldkey", "val"); + Redis.SetValue("oldkey", "val"); Redis.Rename("oldkey", "newkey"); Assert.That(Redis.ContainsKey("oldkey"), Is.False); @@ -163,7 +163,7 @@ public void Can_RenameKey() [Test] public void Can_Expire() { - Redis.SetEntry("key", "val"); + Redis.SetValue("key", "val"); Redis.Expire("key", 1); Assert.That(Redis.ContainsKey("key"), Is.True); Thread.Sleep(2000); @@ -173,18 +173,18 @@ public void Can_Expire() [Test] public void Can_Expire_Ms() { - Redis.SetEntry("key", "val"); + Redis.SetValue("key", "val"); Redis.ExpireEntryIn("key", TimeSpan.FromMilliseconds(100)); Assert.That(Redis.ContainsKey("key"), Is.True); Thread.Sleep(500); Assert.That(Redis.ContainsKey("key"), Is.False); } - [Explicit("Changes in system clock can break test")] + [Ignore("Changes in system clock can break test")] [Test] public void Can_ExpireAt() { - Redis.SetEntry("key", "val"); + Redis.SetValue("key", "val"); var unixNow = DateTime.Now.ToUnixTime(); var in2Secs = unixNow + 2; @@ -199,7 +199,7 @@ public void Can_ExpireAt() [Test] public void Can_GetTimeToLive() { - Redis.SetEntry("key", "val"); + Redis.SetValue("key", "val"); Redis.Expire("key", 10); var ttl = Redis.GetTimeToLive("key"); @@ -247,7 +247,21 @@ public void Can_SlaveOfNoOne() [Test] public void Can_Save() { - Redis.Save(); + try + { + Redis.Save(); + } + catch (RedisResponseException e) + { + // if exception has that message then it still proves that BgSave works as expected. + if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) + return; + + throw; + } } [Test] @@ -261,7 +275,9 @@ public void Can_BgSave() { // if exception has that message then it still proves that BgSave works as expected. if (e.Message.StartsWith("Can't BGSAVE while AOF log rewriting is in progress") - || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now")) + || e.Message.StartsWith("An AOF log rewriting in progress: can't BGSAVE right now") + || e.Message.StartsWith("Background save already in progress") + || e.Message.StartsWith("Another child process is active (AOF?): can't BGSAVE right now")) return; throw; @@ -292,11 +308,14 @@ public void Can_Shutdown() [Test] public void Can_get_Keys_with_pattern() { - 5.Times(i => Redis.SetEntry("k1:" + i, "val")); - 5.Times(i => Redis.SetEntry("k2:" + i, "val")); + 5.Times(i => Redis.SetValue("k1:" + i, "val")); + 5.Times(i => Redis.SetValue("k2:" + i, "val")); var keys = Redis.Keys("k1:*"); Assert.That(keys.Length, Is.EqualTo(5)); + + var scanKeys = Redis.ScanAllKeys("k1:*").ToArray(); + Assert.That(scanKeys.Length, Is.EqualTo(5)); } [Test] @@ -323,7 +342,7 @@ public void Can_GetValues_JSON_strings() { var val = "{\"AuthorId\":0,\"Created\":\"\\/Date(1345961754013)\\/\",\"Name\":\"test\",\"Base64\":\"BQELAAEBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP8BWAFYAViA/wFYAVgBWID/AVgBWAFYgP8BWAFYAViA/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWID/gP+A/wFYgP+A/4D/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/AViA/4D/AVgBWAFYgP8BWAFYAViA/4D/AViA/4D/gP+A/4D/gP8BWAFYgP+A/wFYgP+A/wFYgP+A/4D/gP+A/wFYgP+A/wFYgP+A/4D/gP+A/4D/AVgBWID/gP8BWID/gP8BWAFYAViA/wFYAVgBWID/gP8BWID/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAViA/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP8BWAFYgP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/AVgBWID/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/4D/gP+A/wFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}"; - Redis.SetEntry("UserLevel/1", val); + Redis.SetValue("UserLevel/1", val); var vals = Redis.GetValues(new List(new[] { "UserLevel/1" })); @@ -334,8 +353,9 @@ public void Can_GetValues_JSON_strings() [Test] public void Can_AcquireLock() { - var key = PrefixedKey("AcquireLockKey"); - var lockKey = PrefixedKey("Can_AcquireLock"); + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); Redis.IncrementValue(key); //1 var asyncResults = 5.TimesAsync(i => @@ -365,8 +385,9 @@ private void IncrementKeyInsideLock(String key, String lockKey, int clientNo, IR [Test] public void Can_AcquireLock_TimeOut() { - var key = PrefixedKey("AcquireLockKeyTimeOut"); - var lockKey = PrefixedKey("Can_AcquireLock_TimeOut"); + // guid here is to prevent competition between concurrent runtime tests + var key = PrefixedKey("AcquireLockKeyTimeOut:" + Guid.NewGuid()); + var lockKey = PrefixedKey("Can_AcquireLock_TimeOut:" + Guid.NewGuid()); Redis.IncrementValue(key); //1 var acquiredLock = Redis.AcquireLock(lockKey); var waitFor = TimeSpan.FromMilliseconds(1000); @@ -382,7 +403,7 @@ public void Can_AcquireLock_TimeOut() } } } - catch (TimeoutException tex) + catch (TimeoutException) { var val = Redis.Get(key); Assert.That(val, Is.EqualTo(1)); @@ -392,6 +413,11 @@ public void Can_AcquireLock_TimeOut() Assert.That(timeTaken.TotalMilliseconds < waitFor.TotalMilliseconds + 1000, Is.True); return; } + finally + { + Redis.Remove(key); + Redis.Remove(lockKey); + } Assert.Fail("should have Timed out"); } @@ -399,7 +425,7 @@ public void Can_AcquireLock_TimeOut() public void Can_Append() { const string expectedString = "Hello, " + "World!"; - Redis.SetEntry("key", "Hello, "); + Redis.SetValue("key", "Hello, "); var currentLength = Redis.AppendToValue("key", "World!"); Assert.That(currentLength, Is.EqualTo(expectedString.Length)); @@ -412,7 +438,7 @@ public void Can_Append() public void Can_GetRange() { const string helloWorld = "Hello, World!"; - Redis.SetEntry("key", helloWorld); + Redis.SetValue("key", helloWorld); var fromIndex = "Hello, ".Length; var toIndex = "Hello, World".Length - 1; @@ -547,7 +573,7 @@ public void Should_reset_slowlog() } [Test] - public void Can_get_showlog() + public void Can_get_slowlog() { using (var redis = RedisClient.New()) { @@ -591,7 +617,7 @@ public void Can_change_db_at_runtime() [Test] public void Can_Set_Expire_Seconds() { - Redis.SetEntry("key", "val", expireIn: TimeSpan.FromSeconds(1)); + Redis.SetValue("key", "val", expireIn: TimeSpan.FromSeconds(1)); Assert.That(Redis.ContainsKey("key"), Is.True); Thread.Sleep(2000); Assert.That(Redis.ContainsKey("key"), Is.False); @@ -600,7 +626,7 @@ public void Can_Set_Expire_Seconds() [Test] public void Can_Set_Expire_MilliSeconds() { - Redis.SetEntry("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Redis.SetValue("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); Assert.That(Redis.ContainsKey("key"), Is.True); Thread.Sleep(2000); Assert.That(Redis.ContainsKey("key"), Is.False); @@ -609,12 +635,12 @@ public void Can_Set_Expire_MilliSeconds() [Test] public void Can_Set_Expire_Seconds_if_exists() { - Assert.That(Redis.SetEntryIfExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1500)), + Assert.That(Redis.SetValueIfExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1500)), Is.False); Assert.That(Redis.ContainsKey("key"), Is.False); - Redis.SetEntry("key", "val"); - Assert.That(Redis.SetEntryIfExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Redis.SetValue("key", "val"); + Assert.That(Redis.SetValueIfExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), Is.True); Assert.That(Redis.ContainsKey("key"), Is.True); @@ -625,18 +651,18 @@ public void Can_Set_Expire_Seconds_if_exists() [Test] public void Can_Set_Expire_Seconds_if_not_exists() { - Assert.That(Redis.SetEntryIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Assert.That(Redis.SetValueIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), Is.True); Assert.That(Redis.ContainsKey("key"), Is.True); - Assert.That(Redis.SetEntryIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), + Assert.That(Redis.SetValueIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)), Is.False); Thread.Sleep(2000); Assert.That(Redis.ContainsKey("key"), Is.False); Redis.Remove("key"); - Redis.SetEntryIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); + Redis.SetValueIfNotExists("key", "val", expireIn: TimeSpan.FromMilliseconds(1000)); Assert.That(Redis.ContainsKey("key"), Is.True); } } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.Async.cs b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.Async.cs new file mode 100644 index 00000000..40d56a42 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.Async.cs @@ -0,0 +1,126 @@ +using NUnit.Framework; +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + public static class AsyncExtensions + { + public static async ValueTask ForEachAsync(this List source, Func action) + { + foreach (var item in source) + await action(item).ConfigureAwait(false); + } + public static async ValueTask ForEachAsync(this Dictionary source, Func action) + { + foreach (var item in source) + await action(item.Key, item.Value).ConfigureAwait(false); + } + public static async ValueTask TimesAsync(this int times, Func action) + { + for (int i = 0; i < times; i++) + { + await action(i).ConfigureAwait(false); + } + } + public static async ValueTask> ToListAsync(this IAsyncEnumerable source) + { + var list = new List(); + await foreach (var item in source.ConfigureAwait(false)) + list.Add(item); + return list; + } + + public static async ValueTask CountAsync(this IAsyncEnumerable source) + { + int count = 0; + await foreach (var item in source.ConfigureAwait(false)) + count++; + return count; + } + + public static IRedisClientAsync ForAsyncOnly(this RedisClient client) + { +#if DEBUG + if (client is object) client.DebugAllowSync = false; +#endif + return client; + } + + public static async IAsyncEnumerable TakeAsync(this IAsyncEnumerable source, int count) + { + await foreach (var item in source.ConfigureAwait(false)) + { + if (count > 0) + { + count--; + yield return item; + } + } + } + + public static async ValueTask> ToDictionaryAsync(this IAsyncEnumerable source, Func keySelector, Func valueSelector) + { + var result = new Dictionary(); + await foreach (var item in source.ConfigureAwait(false)) + { + result.Add(keySelector(item), valueSelector(item)); + } + return result; + } + } + public class RedisClientTestsBaseAsyncTests // testing the base class features + : RedisClientTestsBaseAsync + { + [Test] + public void DetectUnexpectedSync() + { + #if DEBUG + Assert.False(RedisRaw.DebugAllowSync, nameof(RedisRaw.DebugAllowSync)); + var ex = Assert.Throws(() => RedisRaw.Ping()); + Assert.AreEqual("Unexpected synchronous operation detected from 'SendReceive'", ex.Message); + #endif + } + } + + [Category("Async")] + public abstract class RedisClientTestsBaseAsync : RedisClientTestsBase + { + protected IRedisClientAsync RedisAsync => base.Redis; + protected IRedisNativeClientAsync NativeAsync => base.Redis; + + [Obsolete("This should use RedisAsync or RedisRaw", true)] + protected new RedisClient Redis => base.Redis; + + protected RedisClient RedisRaw + { + get => base.Redis; + set => base.Redis = value; + } + + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + _ = RedisRaw.ForAsyncOnly(); + } + public override void OnAfterEachTest() + { +#if DEBUG + if(RedisRaw is object) RedisRaw.DebugAllowSync = true; +#endif + base.OnAfterEachTest(); + } + + protected static async ValueTask> ToListAsync(IAsyncEnumerable source, CancellationToken token = default) + { + var list = new List(); + await foreach (var value in source.ConfigureAwait(false).WithCancellation(token)) + { + list.Add(value); + } + return list; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs index afa0f780..087c1c4f 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientTestsBase.cs @@ -1,8 +1,6 @@ -using System; +using NUnit.Framework; using System.Diagnostics; using System.Text; -using NUnit.Framework; -using ServiceStack.Text; namespace ServiceStack.Redis.Tests { @@ -16,7 +14,7 @@ protected void Log(string fmt, params object[] args) Debug.WriteLine("{0}", string.Format(fmt, args).Trim()); } - [TestFixtureSetUp] + [OneTimeSetUp] public virtual void OnBeforeTestFixture() { RedisClient.NewFactoryFn = () => new RedisClient(TestConfig.SingleHost); @@ -26,7 +24,7 @@ public virtual void OnBeforeTestFixture() } } - [TestFixtureTearDown] + [OneTimeTearDown] public virtual void OnAfterTestFixture() { } diff --git a/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs b/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs index d2c6ac5a..804681a2 100644 --- a/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisClientsManagerExtensionsTests.cs @@ -34,7 +34,7 @@ public void Can_Exec_Func_string() { string value = redisManager.Exec(r => { - r.SetEntry("key", "value"); + r.SetValue("key", "value"); return r.GetValue("key"); }); Assert.That(value, Is.EqualTo("value")); diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.Async.cs new file mode 100644 index 00000000..c6f267c3 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.Async.cs @@ -0,0 +1,286 @@ +using NUnit.Framework; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + [Ignore("CI requires redis-server v3.2.0")] + public class RedisGeoNativeClientTestsAsync + { + private readonly IRedisNativeClientAsync redis; + + public RedisGeoNativeClientTestsAsync() + { + redis = new RedisNativeClient(TestConfig.GeoHost); + } + + [OneTimeTearDown] + public async Task OneTimeTearDown() + { + await redis.DisposeAsync(); + } + + [Test] + public async Task Can_GeoAdd_and_GeoPos() + { + await redis.FlushDbAsync(); + var count = await redis.GeoAddAsync("Sicily", 13.361389, 38.115556, "Palermo"); + Assert.That(count, Is.EqualTo(1)); + var results = await redis.GeoPosAsync("Sicily", new[] { "Palermo" }); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + } + + [Test] + public async Task GeoPos_on_NonExistingMember_returns_no_results() + { + await redis.FlushDbAsync(); + var count = await redis.GeoAddAsync("Sicily", 13.361389, 38.115556, "Palermo"); + var results = await redis.GeoPosAsync("Sicily", new[] { "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(0)); + + results = await redis.GeoPosAsync("Sicily", new[] { "Palermo", "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(1)); + } + + [Test] + public async Task Can_GeoAdd_and_GeoPos_multiple() + { + await redis.FlushDbAsync(); + var count = await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + Assert.That(count, Is.EqualTo(2)); + + var results = await redis.GeoPosAsync("Sicily", new[] { "Palermo", "Catania" }); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + } + + [Test] + public async Task Can_GeoDist() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.GeoDistAsync("Sicily", "Palermo", "Catania"); + Assert.That(distance, Is.EqualTo(166274.15156960039).Within(.1)); + } + + [Test] + public async Task GeoDist_on_NonExistingMember_returns_NaN() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.GeoDistAsync("Sicily", "Palermo", "NonExistingMember"); + Assert.That(distance, Is.EqualTo(double.NaN)); + } + + [Test] + public async Task Can_GeoHash() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var hashes = await redis.GeoHashAsync("Sicily", new[] { "Palermo", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.EqualTo("sqdtr74hyu0")); + + hashes = await redis.GeoHashAsync("Sicily", new[] { "Palermo", "NonExistingMember", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.Null); + Assert.That(hashes[2], Is.EqualTo("sqdtr74hyu0")); + } + + [Test] + public async Task Can_GeoRadius_default() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public async Task Can_GeoRadiusByMember_default() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusByMemberAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.Null); + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.Null); + } + + [Test] + public async Task Can_GeoRadius_WithCoord() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, withCoords: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + } + + [Test] + public async Task Can_GeoRadius_WithDist() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, withDist: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + } + + [Test] + public async Task Can_GeoRadius_WithCoord_WithDist_WithHash() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479447370796909)); + } + + [Test] + public async Task Can_GeoRadiusByMember_WithCoord_WithDist_WithHash() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusByMemberAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.583333).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.316667).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(0)); + Assert.That(results[0].Hash, Is.EqualTo(3479030013248308)); + + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(90.9778).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479099956230698)); + } + + [Test] + public async Task Can_GeoRadius_WithCoord_WithDist_WithHash_Count_and_Asc() + { + await redis.FlushDbAsync(); + await redis.GeoAddAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true, count:1, asc:false); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + results = await redis.GeoRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + withCoords: true, withDist: true, withHash: true, count: 1, asc: true); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Catania")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479447370796909)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs index 27c65e17..bace21cf 100644 --- a/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisGeoNativeClientTests.cs @@ -4,18 +4,18 @@ namespace ServiceStack.Redis.Tests { [TestFixture] - [Explicit, Ignore("CI requires redis-server v3.2.0")] + [Ignore("CI requires redis-server v3.2.0")] public class RedisGeoNativeClientTests { private readonly RedisNativeClient redis; public RedisGeoNativeClientTests() { - redis = new RedisNativeClient("10.0.0.121"); + redis = new RedisNativeClient(TestConfig.GeoHost); } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { redis.Dispose(); } diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisGeoTests.Async.cs new file mode 100644 index 00000000..f06280e2 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisGeoTests.Async.cs @@ -0,0 +1,242 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + [Ignore("CI requires redis-server v3.2.0")] + public class RedisGeoTestsAsync + { + private readonly IRedisClientAsync redis; + + public RedisGeoTestsAsync() + { + redis = new RedisClient(TestConfig.GeoHost); + } + + [OneTimeTearDown] + public async Task OneTimeTearDown() + { + if (redis is object) + { + await redis.DisposeAsync(); + } + } + + [Test] + public async Task Can_AddGeoMember_and_GetGeoCoordinates() + { + await redis.FlushDbAsync(); + var count = await redis.AddGeoMemberAsync("Sicily", 13.361389, 38.115556, "Palermo"); + Assert.That(count, Is.EqualTo(1)); + var results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "Palermo" }); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + } + + [Test] + public async Task GetGeoCoordinates_on_NonExistingMember_returns_no_results() + { + await redis.FlushDbAsync(); + var count = await redis.AddGeoMemberAsync("Sicily", 13.361389, 38.115556, "Palermo"); + var results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(0)); + + results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "Palermo", "NonExistingMember" }); + Assert.That(results.Count, Is.EqualTo(1)); + } + + [Test] + public async Task Can_AddGeoMembers_and_GetGeoCoordinates_multiple() + { + await redis.FlushDbAsync(); + var count = await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + Assert.That(count, Is.EqualTo(2)); + + var results = await redis.GetGeoCoordinatesAsync("Sicily", new[] { "Palermo", "Catania" }); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Member, Is.EqualTo("Catania")); + } + + [Test] + public async Task Can_CalculateDistanceBetweenGeoMembers() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.CalculateDistanceBetweenGeoMembersAsync("Sicily", "Palermo", "Catania"); + Assert.That(distance, Is.EqualTo(166274.15156960039).Within(.1)); + } + + [Test] + public async Task CalculateDistanceBetweenGeoMembers_on_NonExistingMember_returns_NaN() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var distance = await redis.CalculateDistanceBetweenGeoMembersAsync("Sicily", "Palermo", "NonExistingMember"); + Assert.That(distance, Is.EqualTo(double.NaN)); + } + + [Test] + public async Task Can_GetGeohashes() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var hashes = await redis.GetGeohashesAsync("Sicily", new[] { "Palermo", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.EqualTo("sqdtr74hyu0")); + + hashes = await redis.GetGeohashesAsync("Sicily", new[] { "Palermo", "NonExistingMember", "Catania" }); + Assert.That(hashes[0], Is.EqualTo("sqc8b49rny0")); + Assert.That(hashes[1], Is.Null); + Assert.That(hashes[2], Is.EqualTo("sqdtr74hyu0")); + } + + [Test] + public async Task Can_FindGeoMembersInRadius() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoMembersInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Length, Is.EqualTo(2)); + Assert.That(results[0], Is.EqualTo("Palermo")); + Assert.That(results[1], Is.EqualTo("Catania")); + } + + //[Test] // method does not exist on IRedisClient/IRedisClientAsync + //public async Task Can_GeoRadiusByMember() + //{ + // await redis.FlushDbAsync(); + // await redis.AddGeoMembersAsync("Sicily", new[] { + // new RedisGeo(13.583333, 37.316667, "Agrigento"), + // new RedisGeo(13.361389, 38.115556, "Palermo"), + // new RedisGeo(15.087269, 37.502669, "Catania") + // }); + + // var results = await redis.GeoRadiusByMemberAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + // Assert.That(results.Count, Is.EqualTo(2)); + // Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + // Assert.That(results[0].Unit, Is.Null); + // Assert.That(results[1].Member, Is.EqualTo("Palermo")); + // Assert.That(results[1].Unit, Is.Null); + //} + + [Test] + public async Task Can_FindGeoResultsInRadius() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoResultsInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + Assert.That(results[1].Member, Is.EqualTo("Catania")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479447370796909)); + } + + [Test] + public async Task Can_FindGeoResultsInRadius_by_Member() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.583333, 37.316667, "Agrigento"), + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoResultsInRadiusAsync("Sicily", "Agrigento", 100, RedisGeoUnit.Kilometers); + + Assert.That(results.Count, Is.EqualTo(2)); + Assert.That(results[0].Member, Is.EqualTo("Agrigento")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.583333).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.316667).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(0)); + Assert.That(results[0].Hash, Is.EqualTo(3479030013248308)); + + Assert.That(results[1].Member, Is.EqualTo("Palermo")); + Assert.That(results[1].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[1].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[1].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[1].Distance, Is.EqualTo(90.9778).Within(.1)); + Assert.That(results[1].Hash, Is.EqualTo(3479099956230698)); + } + + [Test] + public async Task Can_GeoRadius_WithCoord_WithDist_WithHash_Count_and_Asc() + { + await redis.FlushDbAsync(); + await redis.AddGeoMembersAsync("Sicily", new[] { + new RedisGeo(13.361389, 38.115556, "Palermo"), + new RedisGeo(15.087269, 37.502669, "Catania") + }); + + var results = await redis.FindGeoResultsInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + count: 1, sortByNearest: false); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Palermo")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(13.361389).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(38.115556).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(190.4424).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479099956230698)); + + results = await redis.FindGeoResultsInRadiusAsync("Sicily", 15, 37, 200, RedisGeoUnit.Kilometers, + count: 1, sortByNearest: true); + + Assert.That(results.Count, Is.EqualTo(1)); + Assert.That(results[0].Member, Is.EqualTo("Catania")); + Assert.That(results[0].Unit, Is.EqualTo(RedisGeoUnit.Kilometers)); + Assert.That(results[0].Longitude, Is.EqualTo(15.087269).Within(.1)); + Assert.That(results[0].Latitude, Is.EqualTo(37.502669).Within(.1)); + Assert.That(results[0].Distance, Is.EqualTo(56.4413).Within(.1)); + Assert.That(results[0].Hash, Is.EqualTo(3479447370796909)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs b/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs index 3968e530..32248658 100644 --- a/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisGeoTests.cs @@ -4,18 +4,18 @@ namespace ServiceStack.Redis.Tests { [TestFixture] - [Explicit, Ignore("CI requires redis-server v3.2.0")] + [Ignore("CI requires redis-server v3.2.0")] public class RedisGeoTests { private readonly RedisClient redis; public RedisGeoTests() { - redis = new RedisClient("10.0.0.121"); + redis = new RedisClient(TestConfig.GeoHost); } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { redis.Dispose(); } diff --git a/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.Async.cs new file mode 100644 index 00000000..31748b6e --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.Async.cs @@ -0,0 +1,36 @@ +using NUnit.Framework; +using System; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Ignore("Integration"), Category("Async")] + public class RedisHyperLogTestsAsync + { + const string Host = "localhost"; // "10.0.0.14" + private IRedisClientAsync Connect() => new RedisClient(Host); + + [Test] + public async Task Can_Add_to_Hyperlog() + { + await using var redis = Connect(); + + await redis.FlushAllAsync(); + + await redis.AddToHyperLogAsync("hyperlog", new[] { "a", "b", "c" }); + await redis.AddToHyperLogAsync("hyperlog", new[] { "c", "d" }); + + var count = await redis.CountHyperLogAsync("hyperlog"); + + Assert.That(count, Is.EqualTo(4)); + + await redis.AddToHyperLogAsync("hyperlog2", new[] { "c", "d", "e", "f" }); + + await redis.MergeHyperLogsAsync("hypermerge", new[] { "hyperlog", "hyperlog2" }); + + var mergeCount = await redis.CountHyperLogAsync("hypermerge"); + + Assert.That(mergeCount, Is.EqualTo(6)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs index a504a780..7a6af4d6 100644 --- a/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisHyperLogTests.cs @@ -4,7 +4,7 @@ namespace ServiceStack.Redis.Tests { - [TestFixture, Explicit] + [TestFixture, Ignore("Integration")] public class RedisHyperLogTests { [Test] diff --git a/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs b/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs index e34c0ad4..a32f69aa 100644 --- a/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisManagerPoolTests.cs @@ -24,14 +24,14 @@ public class RedisManagerPoolTests private string firstReadWriteHost; private string firstReadOnlyHost; - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { RedisConfig.VerifyMasterConnections = false; } - [TestFixtureTearDown] - public void TestFixtureTearDown() + [OneTimeTearDown] + public void OneTimeTearDown() { RedisConfig.VerifyMasterConnections = true; } @@ -215,8 +215,8 @@ public void Can_support_64_threads_using_the_client_simultaneously() var hostCount = 0; foreach (var entry in clientUsageMap) { - Assert.That(entry.Value, Is.GreaterThanOrEqualTo(5), "Host has unproportianate distrobution: " + entry.Value); - Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportianate distrobution: " + entry.Value); + Assert.That(entry.Value, Is.GreaterThanOrEqualTo(5), "Host has unproportionate distribution: " + entry.Value); + Assert.That(entry.Value, Is.LessThanOrEqualTo(30), "Host has unproportionate distribution: " + entry.Value); hostCount += entry.Value; } diff --git a/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs b/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs index ba3f38f3..db10e714 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPasswordTests.cs @@ -1,50 +1,53 @@ -using NUnit.Framework; +using System; +using NUnit.Framework; namespace ServiceStack.Redis.Tests { [TestFixture] public class RedisPasswordTests { - - [Explicit("Integration")] + [Ignore("Integration")] [Test] - public void Can_connect_to_Slaves_and_Masters_with_Password() + public void Can_connect_to_Replicas_and_Masters_with_Password() { var factory = new PooledRedisClientManager( - readWriteHosts: new[] { "pass@10.0.0.59:6379" }, - readOnlyHosts: new[] { "pass@10.0.0.59:6380" }); + readWriteHosts: new[] {"pass@10.0.0.59:6379"}, + readOnlyHosts: new[] {"pass@10.0.0.59:6380"}); - using (var readWrite = factory.GetClient()) - using (var readOnly = factory.GetReadOnlyClient()) - { - readWrite.SetEntry("Foo", "Bar"); - var value = readOnly.GetEntry("Foo"); + using var readWrite = factory.GetClient(); + using var readOnly = factory.GetReadOnlyClient(); + readWrite.SetValue("Foo", "Bar"); + var value = readOnly.GetValue("Foo"); - Assert.That(value, Is.EqualTo("Bar")); - } + Assert.That(value, Is.EqualTo("Bar")); } [Test] public void Passwords_are_not_leaked_in_exception_messages() - { + { const string password = "yesterdayspassword"; - Assert.Throws(() => { - try - { - var factory = new PooledRedisClientManager(password + "@" + TestConfig.SingleHost); // redis will throw when using password and it's not configured - using (var redis = factory.GetClient()) + Assert.Throws(() => { + try + { + var connString = password + "@" + TestConfig.SingleHost + "?RetryTimeout=2000"; + // redis will throw when using password and it's not configured + var factory = new PooledRedisClientManager(connString); + using var redis = factory.GetClient(); + redis.SetValue("Foo", "Bar"); + } + catch (RedisResponseException ex) + { + Assert.That(ex.Message, Is.Not.Contains(password)); + throw; + } + catch (TimeoutException tex) { - redis.SetEntry("Foo", "Bar"); + Assert.That(tex.InnerException.Message, Is.Not.Contains(password)); + throw tex.InnerException; } - } - catch (RedisResponseException ex) - { - Assert.That(ex.Message, Is.Not.StringContaining(password)); - throw; - } - }, - "Expected an exception after Redis AUTH command; try using a password that doesn't match."); + }, + "Expected an exception after Redis AUTH command; try using a password that doesn't match."); } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.Async.cs new file mode 100644 index 00000000..1f0e5e42 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPersistenceProviderTests.Async.cs @@ -0,0 +1,65 @@ +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class RedisPersistenceProviderTestsAsync + { + [Test] + public async Task Can_Store_and_GetById_ModelWithIdAndName() + { + await using IRedisClientAsync redis = new RedisClient(TestConfig.SingleHost); + const int modelId = 1; + var to = ModelWithIdAndName.Create(modelId); + await redis.StoreAsync(to); + + var from = await redis.GetByIdAsync(modelId); + + ModelWithIdAndName.AssertIsEqual(to, from); + } + + [Test] + public async Task Can_StoreAll_and_GetByIds_ModelWithIdAndName() + { + await using IRedisClientAsync redis = new RedisClient(TestConfig.SingleHost); + + var ids = new[] { 1, 2, 3, 4, 5 }; + var tos = ids.Map(ModelWithIdAndName.Create); + + await redis.StoreAllAsync(tos); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.Id); + + Assert.That(fromIds, Is.EquivalentTo(ids)); + } + + [Test] + public async Task Can_Delete_ModelWithIdAndName() + { + await using IRedisClientAsync redis = new RedisClient(TestConfig.SingleHost); + var ids = new List { 1, 2, 3, 4, 5 }; + var tos = ids.ConvertAll(ModelWithIdAndName.Create); + + await redis.StoreAllAsync(tos); + + var deleteIds = new List { 2, 4 }; + + await redis.DeleteByIdsAsync(deleteIds); + + var froms = await redis.GetByIdsAsync(ids); + var fromIds = froms.Map(x => x.Id); + + var expectedIds = ids.Where(x => !deleteIds.Contains(x)).ToList(); + + Assert.That(fromIds, Is.EquivalentTo(expectedIds)); + } + + } + +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.Async.cs new file mode 100644 index 00000000..4c0a707a --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineCommonTests.Async.cs @@ -0,0 +1,73 @@ +using NUnit.Framework; +using ServiceStack.Text; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisPipelineCommonTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_Set_and_Expire_key_in_atomic_transaction() + { + var oneSec = TimeSpan.FromSeconds(1); + + Assert.That(await RedisAsync.GetValueAsync("key"), Is.Null); + await using (var trans = RedisAsync.CreatePipeline()) //Calls 'MULTI' + { + trans.QueueCommand(r => r.SetValueAsync("key", "a")); //Queues 'SET key a' + trans.QueueCommand(r => r.ExpireEntryInAsync("key", oneSec)); //Queues 'EXPIRE key 1' + + await trans.FlushAsync(); //Calls 'EXEC' + + } //Calls 'DISCARD' if 'EXEC' wasn't called + + Assert.That(await RedisAsync.GetValueAsync("key"), Is.EqualTo("a")); + await Task.Delay(TimeSpan.FromSeconds(2)); + Assert.That(await RedisAsync.GetValueAsync("key"), Is.Null); + } + + [Test] + public async Task Can_SetAll_and_Publish_in_atomic_transaction() + { + var messages = new Dictionary { { "a", "a" }, { "b", "b" } }; + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(c => c.SetAllAsync(messages.ToDictionary(t => t.Key, t => t.Value))); + pipeline.QueueCommand(c => c.PublishMessageAsync("uc", "b")); + + await pipeline.FlushAsync(); + } + + [Test] + public async Task Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() + { + var messages = new List { "message4", "message3", "message2" }; + + await RedisAsync.AddItemToListAsync("workq", "message1"); + + var priority = 1; + await messages.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync("prioritymsgs", x, priority++)); + + var highestPriorityMessage = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync("prioritymsgs"); + + await using (var trans = RedisAsync.CreatePipeline()) + { + trans.QueueCommand(r => r.RemoveItemFromSortedSetAsync("prioritymsgs", highestPriorityMessage)); + trans.QueueCommand(r => r.AddItemToListAsync("workq", highestPriorityMessage)); + + await trans.FlushAsync(); + } + + Assert.That(await RedisAsync.GetAllItemsFromListAsync("workq"), + Is.EquivalentTo(new List { "message1", "message2" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync("prioritymsgs"), + Is.EquivalentTo(new List { "message3", "message4" })); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.Async.cs new file mode 100644 index 00000000..6eea8ed1 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.Async.cs @@ -0,0 +1,281 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisPipelineTestsAsync + : RedisClientTestsBaseAsync + { + private const string Key = "pipemultitest"; + private const string ListKey = "pipemultitest-list"; + private const string SetKey = "pipemultitest-set"; + private const string SortedSetKey = "pipemultitest-sortedset"; + + public override void OnAfterEachTest() + { + CleanMask = Key + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_call_single_operation_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + var map = new Dictionary(); + pipeline.QueueCommand(r => r.GetAsync(Key).AsValueTask(), y => map[Key] = y); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + } + + [Test] + public async Task No_commit_of_atomic_pipelines_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + } + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Exception_in_atomic_pipelines_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + try + { + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + } + [Test] + public async Task Can_call_hash_operations_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var fields = new[] { "field1", "field2", "field3" }; + var values = new[] { "1", "2", "3" }; + var fieldBytes = new byte[fields.Length][]; + for (int i = 0; i < fields.Length; ++i) + { + fieldBytes[i] = GetBytes(fields[i]); + + } + var valueBytes = new byte[values.Length][]; + for (int i = 0; i < values.Length; ++i) + { + valueBytes[i] = GetBytes(values[i]); + + } + byte[][] members = null; + await using var pipeline = RedisAsync.CreatePipeline(); + + + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).HMSetAsync(Key, fieldBytes, valueBytes)); + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).HGetAllAsync(Key), x => members = x); + + + await pipeline.FlushAsync(); + + + for (var i = 0; i < members.Length; i += 2) + { + Assert.AreEqual(members[i], fieldBytes[i / 2]); + Assert.AreEqual(members[i + 1], valueBytes[i / 2]); + + } + } + + [Test] + public async Task Can_call_multiple_setexs_in_pipeline() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var keys = new[] { Key + "key1", Key + "key2", Key + "key3" }; + var values = new[] { "1", "2", "3" }; + await using var pipeline = RedisAsync.CreatePipeline(); + + for (int i = 0; i < 3; ++i) + { + int index0 = i; + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(keys[index0], 100, GetBytes(values[index0]))); + } + + await pipeline.FlushAsync(); + await pipeline.ReplayAsync(); + + + for (int i = 0; i < 3; ++i) + Assert.AreEqual(await RedisAsync.GetValueAsync(keys[i]), values[i]); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_pipeline() + { + var results = new List(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_pipeline() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + pipeline.QueueCommand(r => r.AddItemToSetAsync(SetKey, "setitem")); + pipeline.QueueCommand(r => r.SetContainsItemAsync(SetKey, "setitem"), b => containsItem = b); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem1")); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem2")); + pipeline.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem3")); + pipeline.QueueCommand(r => r.GetListCountAsync(ListKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSetCountAsync(SetKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.GetSortedSetCountAsync(SortedSetKey), intResult => collectionCounts.Add(intResult)); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await pipeline.FlushAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + Assert.That(await RedisAsync.GetAllItemsFromListAsync(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); + Assert.That(await RedisAsync.GetAllItemsFromSetAsync(SetKey), Is.EquivalentTo(new List { "setitem" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); + } + + [Test] + public async Task Can_call_multi_string_operations_in_pipeline() + { + string item1 = null; + string item4 = null; + + var results = new List(); + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(0)); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + pipeline.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem3")); + pipeline.QueueCommand(r => r.GetAllItemsFromListAsync(ListKey), x => results = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(ListKey, 0), x => item1 = x); + pipeline.QueueCommand(r => r.GetItemFromListAsync(ListKey, 4), x => item4 = x); + + await pipeline.FlushAsync(); + } + + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); + Assert.That(item1, Is.EqualTo("listitem1")); + Assert.That(item4, Is.Null); + } + [Test] + // Operations that are not supported in older versions will look at server info to determine what to do. + // If server info is fetched each time, then it will interfer with pipeline + public async Task Can_call_operation_not_supported_on_older_servers_in_pipeline() + { + var temp = new byte[1]; + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(Key + "key", 5, temp)); + await pipeline.FlushAsync(); + } + [Test] + public async Task Pipeline_can_be_replayed() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + await pipeline.FlushAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + await NativeAsync.DelAsync(Key); + await NativeAsync.DelAsync(KeySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + + await pipeline.ReplayAsync(); + await pipeline.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + [Test] + public async Task Pipeline_can_be_contain_watch() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var pipeline = RedisAsync.CreatePipeline(); + pipeline.QueueCommand(r => r.IncrementValueAsync(Key)); + pipeline.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + pipeline.QueueCommand(r => ((IRedisNativeClientAsync)r).WatchAsync(new[] { Key + "FOO" })); + await pipeline.FlushAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + [Test] + public async Task Can_call_AddRangeToSet_in_pipeline() + { + await using var pipeline = RedisAsync.CreatePipeline(); + var key = "pipeline-test"; + + pipeline.QueueCommand(r => r.RemoveAsync(key).AsValueTask()); + pipeline.QueueCommand(r => r.AddRangeToSetAsync(key, new[] { "A", "B", "C" }.ToList())); + + await pipeline.FlushAsync(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs index d9024b41..037f013e 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPipelineTests.cs @@ -59,7 +59,7 @@ public void Exception_in_atomic_pipelines_discards_all_commands() throw new NotSupportedException(); } } - catch (NotSupportedException ignore) + catch (NotSupportedException) { Assert.That(Redis.GetValue(Key), Is.Null); } diff --git a/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs b/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs index 2b51c6ea..4984228c 100644 --- a/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisPubSubServerTests.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; using System.Threading; using NUnit.Framework; using ServiceStack.Text; @@ -9,16 +10,26 @@ namespace ServiceStack.Redis.Tests [TestFixture] public class RedisPubSubServerTests { - private static RedisPubSubServer CreatePubSubServer( - int intervalSecs = 1, int timeoutSecs = 3) + RedisManagerPool clientsManager = new RedisManagerPool(TestConfig.MasterHosts); + + [OneTimeTearDown] + public void OneTimeTearDown() + { + clientsManager.Dispose(); + } + + private RedisPubSubServer CreatePubSubServer( + int intervalSecs = 1, int timeoutSecs = 3, params string[] channels) { - var clientsManager = new RedisManagerPool(TestConfig.MasterHosts); using (var redis = clientsManager.GetClient()) redis.FlushAll(); + + if (channels.Length == 0) + channels = new[] {"topic:test"}; var pubSub = new RedisPubSubServer( clientsManager, - "topic:test") + channels) { HeartbeatInterval = TimeSpan.FromSeconds(intervalSecs), HeartbeatTimeout = TimeSpan.FromSeconds(timeoutSecs) @@ -97,5 +108,42 @@ public void Does_send_heartbeat_pulses_to_multiple_PubSubServers() pubSubs.Each(x => x.Dispose()); } + + [Test] + public void Can_restart_and_subscribe_to_more_channels() + { + var a = new List(); + var b = new List(); + var pubSub = CreatePubSubServer(intervalSecs: 20, timeoutSecs: 30, "topic:a"); + pubSub.OnMessage = (channel, msg) => { + if (channel == "topic:a") + a.Add(msg); + else if (channel == "topic:b") + b.Add(msg); + }; + pubSub.Start(); + Thread.Sleep(100); + + var client = clientsManager.GetClient(); + var i = 0; + client.PublishMessage("topic:a", $"msg: ${++i}"); + client.PublishMessage("topic:b", $"msg: ${++i}"); + + Thread.Sleep(100); + Assert.That(a.Count, Is.EqualTo(1)); + Assert.That(b.Count, Is.EqualTo(0)); + + pubSub.Channels = new[] {"topic:a", "topic:b"}; + pubSub.Restart(); + Thread.Sleep(100); + + client.PublishMessage("topic:a", $"msg: ${++i}"); + client.PublishMessage("topic:b", $"msg: ${++i}"); + + + Thread.Sleep(100); + Assert.That(a.Count, Is.EqualTo(2)); + Assert.That(b.Count, Is.EqualTo(1)); + } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisPubSubTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisPubSubTests.Async.cs new file mode 100644 index 00000000..b8865dc6 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisPubSubTests.Async.cs @@ -0,0 +1,293 @@ +using NUnit.Framework; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration")] + public class RedisPubSubTestsAsync + : RedisClientTestsBaseAsync + { + public override void OnBeforeEachTest() + { + base.OnBeforeEachTest(); + RedisRaw.NamespacePrefix = "RedisPubSubTests"; + } + + [Test] + public async Task Can_Subscribe_and_Publish_single_message() + { + var channelName = PrefixedKey("CHANNEL1"); + const string message = "Hello, World!"; + var key = PrefixedKey("Can_Subscribe_and_Publish_single_message"); + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(message)); + await subscription.UnSubscribeFromAllChannelsAsync(); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing '{0}' to '{1}'", message, channelName); + await redisClient.PublishMessageAsync(channelName, message); + }); + + Log("Start Listening On " + channelName); + await subscription.SubscribeToChannelsAsync(new[] { channelName }); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + } + + [Test] + public async Task Can_Subscribe_and_Publish_single_message_using_wildcard() + { + var channelWildcard = PrefixedKey("CHANNEL.*"); + var channelName = PrefixedKey("CHANNEL.1"); + const string message = "Hello, World!"; + var key = PrefixedKey("Can_Subscribe_and_Publish_single_message"); + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channelWildcard); + Assert.That(channel, Is.EqualTo(channelWildcard)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channelWildcard); + Assert.That(channel, Is.EqualTo(channelWildcard)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(message), "we should get the message, not the channel"); + await subscription.UnSubscribeFromChannelsMatchingAsync(new string[0]); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing '{0}' to '{1}'", message, channelName); + await redisClient.PublishMessageAsync(channelName, message); + }); + + Log("Start Listening On " + channelName); + await subscription.SubscribeToChannelsMatchingAsync(new[] { channelWildcard }); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + } + + [Test] + public async Task Can_Subscribe_and_Publish_multiple_message() + { + var channelName = PrefixedKey("CHANNEL2"); + const string messagePrefix = "MESSAGE "; + string key = PrefixedKey("Can_Subscribe_and_Publish_multiple_message"); + const int publishMessageCount = 5; + var messagesReceived = 0; + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelName)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelName)); + Assert.That(msg, Is.EqualTo(messagePrefix + messagesReceived++)); + + if (messagesReceived == publishMessageCount) + { + await subscription.UnSubscribeFromAllChannelsAsync(); + } + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + for (var i = 0; i < publishMessageCount; i++) + { + var message = messagePrefix + i; + Log("Publishing '{0}' to '{1}'", message, channelName); + await redisClient.PublishMessageAsync(channelName, message); + } + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsAsync(new[] { channelName }); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + + Assert.That(messagesReceived, Is.EqualTo(publishMessageCount)); + } + + [Test] + public async Task Can_Subscribe_and_Publish_message_to_multiple_channels() + { + var channelPrefix = PrefixedKey("CHANNEL3 "); + const string message = "MESSAGE"; + const int publishChannelCount = 5; + var key = PrefixedKey("Can_Subscribe_and_Publish_message_to_multiple_channels"); + + var channels = new List(); + publishChannelCount.Times(i => channels.Add(channelPrefix + i)); + + var messagesReceived = 0; + var channelsSubscribed = 0; + var channelsUnSubscribed = 0; + + await RedisAsync.IncrementValueAsync(key); + + await using (var subscription = await RedisAsync.CreateSubscriptionAsync()) + { + subscription.OnSubscribeAsync += channel => + { + Log("Subscribed to '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelPrefix + channelsSubscribed++)); + return default; + }; + subscription.OnUnSubscribeAsync += channel => + { + Log("UnSubscribed from '{0}'", channel); + Assert.That(channel, Is.EqualTo(channelPrefix + channelsUnSubscribed++)); + return default; + }; + subscription.OnMessageAsync += async (channel, msg) => + { + Log("Received '{0}' from channel '{1}'", msg, channel); + Assert.That(channel, Is.EqualTo(channelPrefix + messagesReceived++)); + Assert.That(msg, Is.EqualTo(message)); + + await subscription.UnSubscribeFromChannelsAsync(new[] { channel }); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + foreach (var channel in channels) + { + Log("Publishing '{0}' to '{1}'", message, channel); + await redisClient.PublishMessageAsync(channel, message); + } + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsAsync(channels.ToArray()); //blocking + } + + Log("Using as normal client again..."); + await RedisAsync.IncrementValueAsync(key); + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo(2)); + + Assert.That(messagesReceived, Is.EqualTo(publishChannelCount)); + Assert.That(channelsSubscribed, Is.EqualTo(publishChannelCount)); + Assert.That(channelsUnSubscribed, Is.EqualTo(publishChannelCount)); + } + + [Test] + public async Task Can_Subscribe_to_channel_pattern() + { + int msgs = 0; + await using var subscription = await RedisAsync.CreateSubscriptionAsync(); + subscription.OnMessageAsync += async (channel, msg) => + { + Debug.WriteLine(String.Format("{0}: {1}", channel, msg + msgs++)); + await subscription.UnSubscribeFromChannelsMatchingAsync(new[] { PrefixedKey("CHANNEL4:TITLE*") }); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing msg..."); + await redisClient.PublishMessageAsync(PrefixedKey("CHANNEL4:TITLE1"), "hello"); // .ToUtf8Bytes() + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsMatchingAsync(new[] { PrefixedKey("CHANNEL4:TITLE*") }); + } + + [Test] + public async Task Can_Subscribe_to_multiplechannel_pattern() + { + var channels = new[] { PrefixedKey("CHANNEL5:TITLE*"), PrefixedKey("CHANNEL5:BODY*") }; + int msgs = 0; + await using var subscription = await RedisAsync.CreateSubscriptionAsync(); + subscription.OnMessageAsync += async (channel, msg) => + { + Debug.WriteLine(String.Format("{0}: {1}", channel, msg + msgs++)); + await subscription.UnSubscribeFromChannelsMatchingAsync(channels); + }; + + ThreadPool.QueueUserWorkItem(async x => + { + await Task.Delay(100); // to be sure that we have subscribers + + await using var redisClient = CreateRedisClient().ForAsyncOnly(); + Log("Publishing msg..."); + await redisClient.PublishMessageAsync(PrefixedKey("CHANNEL5:BODY"), "hello"); // .ToUtf8Bytes() + }); + + Log("Start Listening On"); + await subscription.SubscribeToChannelsMatchingAsync(channels); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisScanTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisScanTests.Async.cs new file mode 100644 index 00000000..b5a79762 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisScanTests.Async.cs @@ -0,0 +1,173 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisScanTestsAsync + : RedisClientTestsBaseAsync + { + [Test] + public async Task Can_scan_10_collection() + { + await RedisAsync.FlushAllAsync(); + var keys = 10.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var ret = await NativeAsync.ScanAsync(0); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(keys)); + } + + [Test] + public async Task Can_scan_100_collection_over_cursor() + { + var allKeys = new HashSet(); + await RedisAsync.FlushAllAsync(); + var keys = 100.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var i = 0; + var ret = new ScanResult(); + while (true) + { + ret = await NativeAsync.ScanAsync(ret.Cursor, 10); + i++; + ret.AsStrings().ForEach(x => allKeys.Add(x)); + if (ret.Cursor == 0) break; + } + + Assert.That(i, Is.GreaterThanOrEqualTo(2)); + Assert.That(allKeys.Count, Is.EqualTo(keys.Count)); + Assert.That(allKeys, Is.EquivalentTo(keys)); + } + + [Test] + public async Task Can_scan_and_search_10_collection() + { + await RedisAsync.FlushAllAsync(); + var keys = 11.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var ret = await NativeAsync.ScanAsync(0, 11, match: "KEY1*"); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(new[] { "KEY1", "KEY10" })); + } + + [Test] + public async Task Can_SScan_10_sets() + { + await RedisAsync.FlushAllAsync(); + var items = 10.Times(x => "item" + x); + await items.ForEachAsync(async x => await RedisAsync.AddItemToSetAsync("scanset", x)); + + var ret = await NativeAsync.SScanAsync("scanset", 0); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(ret.AsStrings(), Is.EquivalentTo(items)); + } + + [Test] + public async Task Can_ZScan_10_sortedsets() + { + await RedisAsync.FlushAllAsync(); + var items = 10.Times(x => "item" + x); + var i = 0; + await items.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync("scanzset", x, i++)); + + var ret = await NativeAsync.ZScanAsync("scanzset", 0); + var itemsWithScore = ret.AsItemsWithScores(); + + Assert.That(itemsWithScore.Keys, Is.EqualTo(items)); + Assert.That(itemsWithScore.Values, Is.EqualTo(10.Times(x => (double)x))); + } + + [Test] + public async Task Can_HScan_10_hashes() + { + await RedisAsync.FlushAllAsync(); + var values = 10.Times(x => "VALUE" + x); + await RedisAsync.SetRangeInHashAsync("scanhash", values.ToSafeDictionary(x => x.Replace("VALUE", "KEY"))); + + var ret = await NativeAsync.HScanAsync("scanhash", 0); + + var keyValues = ret.AsKeyValues(); + + Assert.That(ret.Cursor, Is.GreaterThanOrEqualTo(0)); + Assert.That(keyValues.Keys, Is.EquivalentTo(values.ConvertAll(x => x.Replace("VALUE", "KEY")))); + Assert.That(keyValues.Values, Is.EquivalentTo(values)); + } + + [Test] + public async Task Does_lazy_scan_all_keys() + { + await RedisAsync.FlushAllAsync(); + var keys = 100.Times(x => "KEY" + x); + await RedisAsync.SetAllAsync(keys.ToSafeDictionary(x => x)); + + var scanAllKeys = RedisAsync.ScanAllKeysAsync(pageSize: 10); + var tenKeys = await scanAllKeys.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllKeys.CountAsync(), Is.EqualTo(100)); + } + + [Test] + public async Task Does_lazy_scan_all_set_items() + { + await RedisAsync.FlushAllAsync(); + var items = 100.Times(x => "item" + x); + await items.ForEachAsync(async x => await RedisAsync.AddItemToSetAsync("scanset", x)); + + var scanAllItems = RedisAsync.ScanAllSetItemsAsync("scanset", pageSize: 10); + var tenKeys = await scanAllItems.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllItems.CountAsync(), Is.EqualTo(100)); + } + + [Test] + public async Task Does_lazy_scan_all_sortedset_items() + { + await RedisAsync.FlushAllAsync(); + var items = 100.Times(x => "item" + x); + var i = 0; + await items.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync("scanzset", x, i++)); + + var scanAllItems = RedisAsync.ScanAllSortedSetItemsAsync("scanzset", pageSize: 10); + var tenKeys = await scanAllItems.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllItems.CountAsync(), Is.EqualTo(100)); + + var map = await scanAllItems.ToDictionaryAsync(x => x.Key, x => x.Value); + Assert.That(map.Keys, Is.EquivalentTo(items)); + } + + [Test] + public async Task Does_lazy_scan_all_hash_items() + { + await RedisAsync.FlushAllAsync(); + var values = 100.Times(x => "VALUE" + x); + await RedisAsync.SetRangeInHashAsync("scanhash", values.ToSafeDictionary(x => x.Replace("VALUE", "KEY"))); + + var scanAllItems = RedisAsync.ScanAllHashEntriesAsync("scanhash", pageSize: 10); + var tenKeys = await scanAllItems.TakeAsync(10).ToListAsync(); + + Assert.That(tenKeys.Count, Is.EqualTo(10)); + + Assert.That(await scanAllItems.CountAsync(), Is.EqualTo(100)); + + var map = await scanAllItems.ToDictionaryAsync(x => x.Key, x => x.Value); + Assert.That(map.Values, Is.EquivalentTo(values)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisStatsTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisStatsTests.Async.cs new file mode 100644 index 00000000..2fd145ef --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisStatsTests.Async.cs @@ -0,0 +1,43 @@ +using System.Collections.Generic; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisStatsTestsAsync + : RedisClientTestsBaseAsync + { + [OneTimeSetUp] + public void OneTimeSetUp() + { + RedisConfig.AssumeServerVersion = 2821; + } + + [Test] + [Ignore("too long")] + public async Task Batch_and_Pipeline_requests_only_counts_as_1_request() + { + var reqCount = RedisNativeClient.RequestsPerHour; + + var map = new Dictionary(); + 10.Times(i => map["key" + i] = "value" + i); + + await RedisAsync.SetValuesAsync(map); + + Assert.That(RedisNativeClient.RequestsPerHour, Is.EqualTo(reqCount + 1)); + + var keyTypes = new Dictionary(); + await using (var pipeline = RedisAsync.CreatePipeline()) + { + map.Keys.Each(key => + pipeline.QueueCommand(r => r.TypeAsync(key), x => keyTypes[key] = x)); + + await pipeline.FlushAsync(); + } + + Assert.That(RedisNativeClient.RequestsPerHour, Is.EqualTo(reqCount + 2)); + Assert.That(keyTypes.Count, Is.EqualTo(map.Count)); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs b/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs index 0b7b5d76..04dc8853 100644 --- a/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisStatsTests.cs @@ -7,14 +7,14 @@ namespace ServiceStack.Redis.Tests public class RedisStatsTests : RedisClientTestsBase { - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { RedisConfig.AssumeServerVersion = 2821; } [Test] - [Explicit] + [Ignore("too long")] public void Batch_and_Pipeline_requests_only_counts_as_1_request() { var reqCount = RedisNativeClient.RequestsPerHour; diff --git a/tests/ServiceStack.Redis.Tests/RedisTemplateTests.cs b/tests/ServiceStack.Redis.Tests/RedisTemplateTests.cs new file mode 100644 index 00000000..03876a1d --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisTemplateTests.cs @@ -0,0 +1,47 @@ +using NUnit.Framework; +using ServiceStack.Script; +using ServiceStack.Templates; + +namespace ServiceStack.Redis.Tests +{ + class RedisTemplateTests + { + [Test] + public void Does_build_connection_string() + { + var context = new ScriptContext + { + ScriptMethods = { new RedisScripts() } + }; + context.Container.AddSingleton(() => new RedisManagerPool()); + context.Init(); + + Assert.That(context.EvaluateScript("{{ redisToConnectionString: host:7000?db=1 }}"), + Is.EqualTo("host:7000?db=1")); + + Assert.That(context.EvaluateScript("{{ { host: 'host' } | redisToConnectionString }}"), + Is.EqualTo("host:6379?db=0")); + + Assert.That(context.EvaluateScript("{{ { port: 7000 } | redisToConnectionString }}"), + Is.EqualTo("localhost:7000?db=0")); + + Assert.That(context.EvaluateScript("{{ { db: 1 } | redisToConnectionString }}"), + Is.EqualTo("localhost:6379?db=1")); + + Assert.That(context.EvaluateScript("{{ { host: 'host', port: 7000, db: 1 } | redisToConnectionString }}"), + Is.EqualTo("host:7000?db=1")); + + Assert.That(context.EvaluateScript("{{ { host: 'host', port: 7000, db: 1, password:'secret' } | redisToConnectionString | raw }}"), + Is.EqualTo("host:7000?db=1&password=secret")); + + Assert.That(context.EvaluateScript("{{ redisConnectionString }}"), + Is.EqualTo("localhost:6379?db=0")); + + Assert.That(context.EvaluateScript("{{ { db: 1 } | redisChangeConnection }}"), + Is.EqualTo("localhost:6379?db=1")); + + Assert.That(context.EvaluateScript("{{ redisConnectionString }}"), + Is.EqualTo("localhost:6379?db=1")); + } + } +} diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.Async.cs new file mode 100644 index 00000000..6ecd123b --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.Async.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisTransactionCommonTestsAsync + : RedisClientTestsBaseAsync + { + private const string Prefix = "tran"; + + public override void OnAfterEachTest() + { + CleanMask = Prefix + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_Set_and_Expire_key_in_atomic_transaction() + { + var oneSec = TimeSpan.FromSeconds(1); + + Assert.That(await RedisAsync.GetValueAsync(Prefix + "key"), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) //Calls 'MULTI' + { + trans.QueueCommand(r => r.SetValueAsync(Prefix + "key", "a")); //Queues 'SET key a' + trans.QueueCommand(r => r.ExpireEntryInAsync(Prefix + "key", oneSec)); //Queues 'EXPIRE key 1' + + await trans.CommitAsync(); //Calls 'EXEC' + + } //Calls 'DISCARD' if 'EXEC' wasn't called + + Assert.That(await RedisAsync.GetValueAsync(Prefix + "key"), Is.EqualTo("a")); + await Task.Delay(TimeSpan.FromSeconds(2)); + Assert.That(await RedisAsync.GetValueAsync(Prefix + "key"), Is.Null); + } + + [Test] + public async Task Can_Pop_priority_message_from_SortedSet_and_Add_to_workq_in_atomic_transaction() + { + var messages = new List { "message4", "message3", "message2" }; + + await RedisAsync.AddItemToListAsync(Prefix + "workq", "message1"); + + var priority = 1; + await messages.ForEachAsync(async x => await RedisAsync.AddItemToSortedSetAsync(Prefix + "prioritymsgs", x, priority++)); + + var highestPriorityMessage = await RedisAsync.PopItemWithHighestScoreFromSortedSetAsync(Prefix + "prioritymsgs"); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.RemoveItemFromSortedSetAsync(Prefix + "prioritymsgs", highestPriorityMessage)); + trans.QueueCommand(r => r.AddItemToListAsync(Prefix + "workq", highestPriorityMessage)); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetAllItemsFromListAsync(Prefix + "workq"), + Is.EquivalentTo(new List { "message1", "message2" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync(Prefix + "prioritymsgs"), + Is.EquivalentTo(new List { "message3", "message4" })); + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs index 20ada901..8b4ad350 100644 --- a/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionCommonTests.cs @@ -25,7 +25,7 @@ public void Can_Set_and_Expire_key_in_atomic_transaction() Assert.That(Redis.GetValue(Prefix + "key"), Is.Null); using (var trans = Redis.CreateTransaction()) //Calls 'MULTI' { - trans.QueueCommand(r => r.SetEntry(Prefix + "key", "a")); //Queues 'SET key a' + trans.QueueCommand(r => r.SetValue(Prefix + "key", "a")); //Queues 'SET key a' trans.QueueCommand(r => r.ExpireEntryIn(Prefix + "key", oneSec)); //Queues 'EXPIRE key 1' trans.Commit(); //Calls 'EXEC' diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionTests.Async.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.Async.cs new file mode 100644 index 00000000..3d11b6b4 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.Async.cs @@ -0,0 +1,417 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture] + public class RedisTransactionTestsAsync + : RedisClientTestsBaseAsync + { + private const string Key = "rdtmultitest"; + private const string ListKey = "rdtmultitest-list"; + private const string SetKey = "rdtmultitest-set"; + private const string SortedSetKey = "rdtmultitest-sortedset"; + private const string HashKey = "rdthashtest"; + + public override void OnAfterEachTest() + { + CleanMask = Key + "*"; + base.OnAfterEachTest(); + } + + [Test] + public async Task Can_call_single_operation_in_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + var map = new Dictionary(); + trans.QueueCommand(r => r.GetAsync(Key).AsValueTask(), y => map[Key] = y); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + } + + [Test] + public async Task No_commit_of_atomic_transactions_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + } + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + + [Test] + public async Task Watch_aborts_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + const string value1 = "value1"; + try + { + await RedisAsync.WatchAsync(new[] { Key }); + await RedisAsync.SetAsync(Key, value1); + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.SetAsync(Key, value1).AsValueTask()); + var success = await trans.CommitAsync(); + Assert.False(success); + Assert.AreEqual(value1, await RedisAsync.GetAsync(Key)); + } + catch (NotSupportedException) + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Exception_in_atomic_transactions_discards_all_commands() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + try + { + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + throw new NotSupportedException(); + } + catch (NotSupportedException) + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + } + } + + [Test] + public async Task Can_call_single_operation_3_Times_in_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + } + + [Test] + public async Task Can_call_single_operation_with_callback_3_Times_in_transaction() + { + var results = new List(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + trans.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + trans.QueueCommand(r => r.IncrementValueAsync(Key), results.Add); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("3")); + Assert.That(results, Is.EquivalentTo(new List { 1, 2, 3 })); + } + + [Test] + public async Task Supports_different_operation_types_in_same_transaction() + { + var incrementResults = new List(); + var collectionCounts = new List(); + var containsItem = false; + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + trans.QueueCommand(r => r.AddItemToSetAsync(SetKey, "setitem")); + trans.QueueCommand(r => r.SetContainsItemAsync(SetKey, "setitem"), b => containsItem = b); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem1")); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem2")); + trans.QueueCommand(r => r.AddItemToSortedSetAsync(SortedSetKey, "sortedsetitem3")); + trans.QueueCommand(r => r.GetListCountAsync(ListKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSetCountAsync(SetKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.GetSortedSetCountAsync(SortedSetKey), intResult => collectionCounts.Add(intResult)); + trans.QueueCommand(r => r.IncrementValueAsync(Key), intResult => incrementResults.Add(intResult)); + + await trans.CommitAsync(); + } + + Assert.That(containsItem, Is.True); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("2")); + Assert.That(incrementResults, Is.EquivalentTo(new List { 1, 2 })); + Assert.That(collectionCounts, Is.EquivalentTo(new List { 2, 1, 3 })); + Assert.That(await RedisAsync.GetAllItemsFromListAsync(ListKey), Is.EquivalentTo(new List { "listitem1", "listitem2" })); + Assert.That(await RedisAsync.GetAllItemsFromSetAsync(SetKey), Is.EquivalentTo(new List { "setitem" })); + Assert.That(await RedisAsync.GetAllItemsFromSortedSetAsync(SortedSetKey), Is.EquivalentTo(new List { "sortedsetitem1", "sortedsetitem2", "sortedsetitem3" })); + } + + [Test] + public async Task Can_call_multi_string_operations_in_transaction() + { + string item1 = null; + string item4 = null; + + var results = new List(); + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(0)); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem1")); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem2")); + trans.QueueCommand(r => r.AddItemToListAsync(ListKey, "listitem3")); + trans.QueueCommand(r => r.GetAllItemsFromListAsync(ListKey), x => results = x); + trans.QueueCommand(r => r.GetItemFromListAsync(ListKey, 0), x => item1 = x); + trans.QueueCommand(r => r.GetItemFromListAsync(ListKey, 4), x => item4 = x); + + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetListCountAsync(ListKey), Is.EqualTo(3)); + Assert.That(results, Is.EquivalentTo(new List { "listitem1", "listitem2", "listitem3" })); + Assert.That(item1, Is.EqualTo("listitem1")); + Assert.That(item4, Is.Null); + } + [Test] + public async Task Can_call_multiple_setexs_in_transaction() + { + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + var keys = new[] { "key1", "key2", "key3" }; + var values = new[] { "1", "2", "3" }; + await using var trans = await RedisAsync.CreateTransactionAsync(); + + for (int i = 0; i < 3; ++i) + { + int index0 = i; + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(keys[index0], 100, GetBytes(values[index0]))); + } + + await trans.CommitAsync(); + await trans.ReplayAsync(); + + + for (int i = 0; i < 3; ++i) + Assert.AreEqual(await RedisAsync.GetValueAsync(keys[i]), values[i]); + } + [Test] + // Operations that are not supported in older versions will look at server info to determine what to do. + // If server info is fetched each time, then it will interfer with transaction + public async Task Can_call_operation_not_supported_on_older_servers_in_transaction() + { + var temp = new byte[1]; + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => ((IRedisNativeClientAsync)r).SetExAsync(Key, 5, temp)); + await trans.CommitAsync(); + } + + + [Test] + public async Task Transaction_can_be_replayed() + { + string KeySquared = Key + Key; + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + await using var trans = await RedisAsync.CreateTransactionAsync(); + trans.QueueCommand(r => r.IncrementValueAsync(Key)); + trans.QueueCommand(r => r.IncrementValueAsync(KeySquared)); + await trans.CommitAsync(); + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + await NativeAsync.DelAsync(Key); + await NativeAsync.DelAsync(KeySquared); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + + await trans.ReplayAsync(); + await trans.DisposeAsync(); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("1")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.EqualTo("1")); + } + + [Test] + public async Task Transaction_can_issue_watch() + { + await NativeAsync.DelAsync(Key); + Assert.That(await RedisAsync.GetValueAsync(Key), Is.Null); + + string KeySquared = Key + Key; + await NativeAsync.DelAsync(KeySquared); + + await RedisAsync.WatchAsync(new[] { Key, KeySquared }); + await RedisAsync.SetAsync(Key, 7); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.SetAsync(Key, 1).AsValueTask()); + trans.QueueCommand(r => r.SetAsync(KeySquared, 2).AsValueTask()); + await trans.CommitAsync(); + } + + Assert.That(await RedisAsync.GetValueAsync(Key), Is.EqualTo("7")); + Assert.That(await RedisAsync.GetValueAsync(KeySquared), Is.Null); + } + + [Test] + public async Task Can_set_Expiry_on_key_in_transaction() + { + var expiresIn = TimeSpan.FromMinutes(15); + + const string key = "No TTL-Transaction"; + var keyWithTtl = "{0}s TTL-Transaction".Fmt(expiresIn.TotalSeconds); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddAsync(key, "Foo").AsValueTask()); + trans.QueueCommand(r => r.AddAsync(keyWithTtl, "Bar", expiresIn).AsValueTask()); + + if (!await trans.CommitAsync()) + throw new Exception("Transaction Failed"); + } + + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo("Foo")); + Assert.That(await RedisAsync.GetAsync(keyWithTtl), Is.EqualTo("Bar")); + + Assert.That(await RedisAsync.GetTimeToLiveAsync(key), Is.EqualTo(TimeSpan.MaxValue)); + Assert.That((await RedisAsync.GetTimeToLiveAsync(keyWithTtl)).Value.TotalSeconds, Is.GreaterThan(1)); + } + + [Test] + public async Task Does_not_set_Expiry_on_existing_key_in_transaction() + { + var expiresIn = TimeSpan.FromMinutes(15); + + var key = "Exting TTL-Transaction"; + await RedisAsync.AddAsync(key, "Foo"); + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.AddAsync(key, "Bar", expiresIn).AsValueTask()); + + if (!await trans.CommitAsync()) + throw new Exception("Transaction Failed"); + } + + Assert.That(await RedisAsync.GetAsync(key), Is.EqualTo("Foo")); + Assert.That(await RedisAsync.GetTimeToLiveAsync(key), Is.EqualTo(TimeSpan.MaxValue)); + } + + [Test] + public async Task Can_call_GetAllEntriesFromHash_in_transaction() + { + var stringMap = new Dictionary { + {"one","a"}, {"two","b"}, {"three","c"}, {"four","d"} + }; + foreach (var x in stringMap) + { + await RedisAsync.SetEntryInHashAsync(HashKey, x.Key, x.Value); + } + + Dictionary results = null; + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.GetAllEntriesFromHashAsync(HashKey), x => results = x); + + await trans.CommitAsync(); + } + + Assert.That(results, Is.EquivalentTo(stringMap)); + } + + [Test] + public async Task Can_call_Type_in_transaction() + { + await RedisAsync.SetValueAsync("string", "STRING"); + await RedisAsync.AddItemToListAsync("list", "LIST"); + await RedisAsync.AddItemToSetAsync("set", "SET"); + await RedisAsync.AddItemToSortedSetAsync("zset", "ZSET", 1); + + var keys = new[] { "string", "list", "set", "zset" }; + + var results = new Dictionary(); + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + foreach (var key in keys) + { + trans.QueueCommand(r => r.TypeAsync(key), x => results[key] = x); + } + + await trans.CommitAsync(); + } + + results.PrintDump(); + + Assert.That(results, Is.EquivalentTo(new Dictionary + { + {"string", "string" }, + {"list", "list" }, + {"set", "set" }, + {"zset", "zset" }, + })); + } + + [Test] + public async Task Can_call_HashSet_commands_in_transaction() + { + await RedisAsync.AddItemToSetAsync("set", "ITEM 1"); + await RedisAsync.AddItemToSetAsync("set", "ITEM 2"); + HashSet result = null; + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.GetAllItemsFromSetAsync("set"), values => result = values); + + await trans.CommitAsync(); + } + + Assert.That(result, Is.EquivalentTo(new[] { "ITEM 1", "ITEM 2" })); + } + + [Test] + public async Task Can_call_LUA_Script_in_transaction() + { + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.ExecLuaAsync("return {'myval', 'myotherval'}", new string[0])); + + await trans.CommitAsync(); + } + + RedisText result = null; + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(r => r.ExecLuaAsync("return {'myval', 'myotherval'}", new string[0]), s => result = s); + + await trans.CommitAsync(); + } + + Assert.That(result.Children[0].Text, Is.EqualTo("myval")); + Assert.That(result.Children[1].Text, Is.EqualTo("myotherval")); + } + + [Test] + public async Task Can_call_SetValueIfNotExists_in_transaction() + { + bool f = false; + bool s = false; + + await using (var trans = await RedisAsync.CreateTransactionAsync()) + { + trans.QueueCommand(c => c.SetValueIfNotExistsAsync("foo", "blah"), r => f = r); + trans.QueueCommand(c => c.SetValueIfNotExistsAsync("bar", "blah"), r => s = r); + await trans.CommitAsync(); + } + + Assert.That(f); + Assert.That(s); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs index f63cc592..b3460118 100644 --- a/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs +++ b/tests/ServiceStack.Redis.Tests/RedisTransactionTests.cs @@ -53,7 +53,6 @@ public void Watch_aborts_transaction() { Assert.That(Redis.GetValue(Key), Is.Null); const string value1 = "value1"; - const string value2 = "value2"; try { Redis.Watch(Key); @@ -66,7 +65,7 @@ public void Watch_aborts_transaction() Assert.AreEqual(value1, Redis.Get(Key)); } } - catch (NotSupportedException ignore) + catch (NotSupportedException) { Assert.That(Redis.GetValue(Key), Is.Null); } @@ -84,7 +83,7 @@ public void Exception_in_atomic_transactions_discards_all_commands() throw new NotSupportedException(); } } - catch (NotSupportedException ignore) + catch (NotSupportedException) { Assert.That(Redis.GetValue(Key), Is.Null); } @@ -403,5 +402,22 @@ public void Can_call_LUA_Script_in_transaction() Assert.That(result.Children[0].Text, Is.EqualTo("myval")); Assert.That(result.Children[1].Text, Is.EqualTo("myotherval")); } + + [Test] + public void Can_call_SetValueIfNotExists_in_transaction() + { + bool f = false; + bool s = false; + + using (var trans = Redis.CreateTransaction()) + { + trans.QueueCommand(c => c.SetValueIfNotExists("foo", "blah"), r => f = r); + trans.QueueCommand(c => c.SetValueIfNotExists("bar", "blah"), r => s = r); + trans.Commit(); + } + + Assert.That(f); + Assert.That(s); + } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RetryCommandTests.Async.cs b/tests/ServiceStack.Redis.Tests/RetryCommandTests.Async.cs new file mode 100644 index 00000000..6e2ed462 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/RetryCommandTests.Async.cs @@ -0,0 +1,143 @@ +using NUnit.Framework; +using System; +using System.Linq; +using System.Net.Sockets; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + public class RetryCommandTestsAsync + { + [Test, Ignore("3 vs 2 needs investigation; does same in non-async")] + public async Task Does_retry_failed_commands() + { + // warning: this test looks brittle; is often failing "Expected: 3 But was: 2" (on main branch); + + // LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); + // RedisConfig.EnableVerboseLogging = true; + RedisStats.Reset(); + + var redisCtrl = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redisCtrl.FlushAllAsync(); + await redisCtrl.SetClientAsync("redisCtrl"); + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.SetClientAsync("redisRetry"); + + var clientInfo = await redisCtrl.GetClientsInfoAsync(); + var redisId = clientInfo.First(m => m["name"] == "redisRetry")["id"]; + Assert.That(redisId.Length, Is.GreaterThan(0)); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + ((IRedisClient)redisCtrl).KillClients(withId: redisId); + }; + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(2)); + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(3)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + + [Test] + public async Task Does_retry_failed_commands_with_SocketException() + { + RedisStats.Reset(); + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.FlushAllAsync(); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + ((RedisClient)redis).OnBeforeFlush = null; + throw new SocketException(); + }; + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(2)); + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(3)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(0)); + } + + [Test] + public async Task Does_Timeout_with_repeated_SocketException() + { + RedisConfig.Reset(); + RedisConfig.DefaultRetryTimeout = 100; + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.FlushAllAsync(); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + throw new SocketException(); + }; + + try + { + await redis.IncrementValueAsync("retryCounter"); + Assert.Fail("Should throw"); + } + catch (RedisException ex) + { + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); + + ((RedisClient)redis).OnBeforeFlush = null; + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(1)); + + Assert.That(RedisStats.TotalRetryCount, Is.GreaterThan(1)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(1)); + } + + RedisConfig.Reset(); + } + + [Test] + public async Task Does_not_retry_when_RetryTimeout_is_Zero() + { + RedisConfig.Reset(); + RedisConfig.DefaultRetryTimeout = 0; + + var redis = new RedisClient(RedisConfig.DefaultHost).ForAsyncOnly(); + await redis.FlushAllAsync(); + + Assert.That(await redis.IncrementValueAsync("retryCounter"), Is.EqualTo(1)); + + ((RedisClient)redis).OnBeforeFlush = () => + { + throw new SocketException(); + }; + + try + { + await redis.IncrementValueAsync("retryCounter"); + Assert.Fail("Should throw"); + } + catch (Exception ex) + { + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); + + ((RedisClient)redis).OnBeforeFlush = null; + Assert.That(await redis.GetAsync("retryCounter"), Is.EqualTo(1)); + + Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(0)); + Assert.That(RedisStats.TotalRetryTimedout, Is.EqualTo(1)); + } + + RedisConfig.Reset(); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs b/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs index 4b67896e..e81575d4 100644 --- a/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs +++ b/tests/ServiceStack.Redis.Tests/RetryCommandTests.cs @@ -14,8 +14,10 @@ public class RetryCommandTests [Test] public void Does_retry_failed_commands() { - //LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); + // warning: this test looks brittle; is often failing "Expected: 3 But was: 2" (on main branch); + // LogManager.LogFactory = new ConsoleLogFactory(debugEnabled: true); + // RedisConfig.EnableVerboseLogging = true; RedisStats.Reset(); var redisCtrl = new RedisClient(RedisConfig.DefaultHost); @@ -37,7 +39,7 @@ public void Does_retry_failed_commands() }; Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(2)); - Assert.That(redis.Get("retryCounter"), Is.EqualTo(2)); + Assert.That(redis.Get("retryCounter"), Is.EqualTo(3)); Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); @@ -61,7 +63,7 @@ public void Does_retry_failed_commands_with_SocketException() }; Assert.That(redis.IncrementValue("retryCounter"), Is.EqualTo(2)); - Assert.That(redis.Get("retryCounter"), Is.EqualTo(2)); + Assert.That(redis.Get("retryCounter"), Is.EqualTo(3)); Assert.That(RedisStats.TotalRetryCount, Is.EqualTo(1)); Assert.That(RedisStats.TotalRetrySuccess, Is.EqualTo(1)); @@ -91,7 +93,7 @@ public void Does_Timeout_with_repeated_SocketException() } catch (RedisException ex) { - Assert.That(ex.Message, Is.StringStarting("Exceeded timeout")); + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); redis.OnBeforeFlush = null; Assert.That(redis.Get("retryCounter"), Is.EqualTo(1)); @@ -127,7 +129,7 @@ public void Does_not_retry_when_RetryTimeout_is_Zero() } catch (Exception ex) { - Assert.That(ex.Message, Is.StringStarting("Exceeded timeout")); + Assert.That(ex.Message, Does.StartWith("Exceeded timeout")); redis.OnBeforeFlush = null; Assert.That(redis.Get("retryCounter"), Is.EqualTo(1)); diff --git a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj index cdc61cf1..374a13f9 100644 --- a/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj +++ b/tests/ServiceStack.Redis.Tests/ServiceStack.Redis.Tests.csproj @@ -1,350 +1,54 @@ - - + - Debug - AnyCPU - 9.0.30729 - 2.0 - {951D28EE-5D22-4C62-AC0F-1661A8CEEC5A} - Library - Properties - ServiceStack.Redis.Tests + + net472;net6.0 + portable ServiceStack.Redis.Tests - v4.5 - 512 - - - 3.5 - - publish\ - true - Disk - false - Foreground - 7 - Days - false - false - true - 0 - 1.0.0.%2a - false - false - true - - ..\..\src\ - true - - - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - AllRules.ruleset - false - - - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - AllRules.ruleset - x86 - false - - - true - bin\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - false - - - true - bin\MonoTouch\ - DEBUG;TRACE - full - AnyCPU - prompt - 4 - false - AllRules.ruleset - false - - - true - bin\x86\Debug\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - true - true - false - - - bin\x86\Release\ - TRACE - true - pdbonly - x86 - prompt - AllRules.ruleset - true - true - false - - - true - bin\x86\STATIC_ONLY NO_EXPRESSIONS\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - true - false - false - - - true - bin\x86\MonoTouch\ - DEBUG;TRACE - full - x86 - prompt - AllRules.ruleset - false - false - false - - - bin\Signed\ - TRACE - true - pdbonly - x86 - prompt - AllRules.ruleset - false - - - bin\x86\Signed\ - TRACE - true - pdbonly - x86 - prompt - AllRules.ruleset - false + Library + ServiceStack.Redis.Tests + false + false + false + false + false + false + false + false - - False - ..\..\lib\tests\Moq.dll - - - False - ..\..\src\packages\NUnit.2.6.3\lib\nunit.framework.dll - - - ..\..\lib\tests\ServiceStack.dll - - - ..\..\lib\ServiceStack.Common.dll - - - ..\..\lib\ServiceStack.Interfaces.dll - - - ..\..\lib\ServiceStack.Text.dll - - - ..\..\src\packages\Sider.0.9.3\lib\net40-Client\Sider.dll - True - - - - 3.5 - - - - - 3.5 - - - 3.5 - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Code - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + $(DefineConstants);NET45 + + + + - - - False - .NET Framework 3.5 SP1 Client Profile - false - - - False - .NET Framework 3.5 SP1 - true - - - False - Windows Installer 3.1 - true - + + $(DefineConstants);NETCORE + + + + + + + + + + - - - {AF99F19B-4C04-4F58-81EF-B092F1FCC540} - ServiceStack.Redis - + - - - \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs index 49eb083a..74f4992d 100644 --- a/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs +++ b/tests/ServiceStack.Redis.Tests/Shared/ModelWithFieldsOfDifferentTypes.cs @@ -160,29 +160,22 @@ public override int GetHashCode() public static void AssertIsEqual(ModelWithFieldsOfDifferentTypes actual, ModelWithFieldsOfDifferentTypes expected) { - Assert.That(actual.Id, Is.EqualTo(expected.Id)); - Assert.That(actual.Name, Is.EqualTo(expected.Name)); - Assert.That(actual.Guid, Is.EqualTo(expected.Guid)); - Assert.That(actual.LongId, Is.EqualTo(expected.LongId)); - Assert.That(actual.Bool, Is.EqualTo(expected.Bool)); - try - { - Assert.That(actual.DateTime, Is.EqualTo(expected.DateTime)); - } - catch (Exception ex) - { - Log.Error("Trouble with DateTime precisions, trying Assert again with rounding to seconds", ex); - Assert.That(actual.DateTime.RoundToSecond(), Is.EqualTo(expected.DateTime.RoundToSecond())); - } - try - { - Assert.That(actual.Double, Is.EqualTo(expected.Double)); - } - catch (Exception ex) - { - Log.Error("Trouble with double precisions, trying Assert again with rounding to 10 decimals", ex); - Assert.That(Math.Round(actual.Double, 10), Is.EqualTo(Math.Round(actual.Double, 10))); - } + if (actual.Id != expected.Id) + throw new Exception($"{actual.Id} != {expected.Id}"); + if (actual.Name != expected.Name) + throw new Exception($"{actual.Name} != {expected.Name}"); + if (actual.Guid != expected.Guid) + throw new Exception($"{actual.Guid} != {expected.Guid}"); + if (actual.LongId != expected.LongId) + throw new Exception($"{actual.LongId} != {expected.LongId}"); + if (actual.Bool != expected.Bool) + throw new Exception($"{actual.Bool} != {expected.Bool}"); + + if (actual.DateTime.RoundToSecond() != expected.DateTime.RoundToSecond()) + throw new Exception($"{actual.DateTime.RoundToSecond()} != {expected.DateTime.RoundToSecond()}"); + + if (Math.Abs(actual.Double - expected.Double) > 1) + throw new Exception($"{actual.Double} != {expected.Double}"); } } } \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ShippersExample.Async.cs b/tests/ServiceStack.Redis.Tests/ShippersExample.Async.cs new file mode 100644 index 00000000..c0154116 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/ShippersExample.Async.cs @@ -0,0 +1,126 @@ +// +// ServiceStack.Redis: ECMA CLI Binding to the Redis key-value storage system +// +// Authors: +// Demis Bellot (demis.bellot@gmail.com) +// +// Copyright 2013 Service Stack LLC. All Rights Reserved. +// +// Licensed under the same terms of reddis and ServiceStack: new BSD license. +// + +using System; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; +using ServiceStack.Common.Tests.Models; +using ServiceStack.Redis.Generic; +using ServiceStack.Text; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Async")] + public class ShippersExampleAsync + { + + public class Shipper + { + public long Id { get; set; } + public string CompanyName { get; set; } + public DateTime DateCreated { get; set; } + public ShipperType ShipperType { get; set; } + public Guid UniqueRef { get; set; } + } + + static void Dump(string message, T entity) + { + var text = TypeSerializer.SerializeToString(entity); + + //make it a little easier on the eyes + var prettyLines = text.Split(new[] { "[", "},{", "]" }, + StringSplitOptions.RemoveEmptyEntries) + .ToList().ConvertAll(x => x.Replace("{", "").Replace("}", "")); + + Debug.WriteLine("\n" + message); + foreach (var l in prettyLines) Debug.WriteLine(l); + } + + [Test] + public async Task Shippers_UseCase() + { + await using var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + //Create a 'strongly-typed' API that makes all Redis Value operations to apply against Shippers + IRedisTypedClientAsync redis = redisClient.As(); + + //Redis lists implement IList while Redis sets implement ICollection + var currentShippers = redis.Lists["urn:shippers:current"]; + var prospectiveShippers = redis.Lists["urn:shippers:prospective"]; + + await currentShippers.AddAsync( + new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "Trains R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Trains, + UniqueRef = Guid.NewGuid() + }); + + await currentShippers.AddAsync( + new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "Planes R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Planes, + UniqueRef = Guid.NewGuid() + }); + + var lameShipper = new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "We do everything!", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.All, + UniqueRef = Guid.NewGuid() + }; + + await currentShippers.AddAsync(lameShipper); + + Dump("ADDED 3 SHIPPERS:", await currentShippers.ToListAsync()); + + await currentShippers.RemoveAsync(lameShipper); + + Dump("REMOVED 1:", await currentShippers.ToListAsync()); + + await prospectiveShippers.AddAsync( + new Shipper + { + Id = await redis.GetNextSequenceAsync(), + CompanyName = "Trucks R Us", + DateCreated = DateTime.UtcNow, + ShipperType = ShipperType.Automobiles, + UniqueRef = Guid.NewGuid() + }); + + Dump("ADDED A PROSPECTIVE SHIPPER:", await prospectiveShippers.ToListAsync()); + + await redis.PopAndPushItemBetweenListsAsync(prospectiveShippers, currentShippers); + + Dump("CURRENT SHIPPERS AFTER POP n' PUSH:", await currentShippers.ToListAsync()); + Dump("PROSPECTIVE SHIPPERS AFTER POP n' PUSH:", await prospectiveShippers.ToListAsync()); + + var poppedShipper = await redis.PopItemFromListAsync(currentShippers); + Dump("POPPED a SHIPPER:", poppedShipper); + Dump("CURRENT SHIPPERS AFTER POP:", await currentShippers.ToListAsync()); + + //reset sequence and delete all lists + await redis.SetSequenceAsync(0); + await redis.RemoveEntryAsync(new[] { currentShippers, prospectiveShippers }); + Dump("DELETING CURRENT AND PROSPECTIVE SHIPPERS:", await currentShippers.ToListAsync()); + + } + + } +} diff --git a/tests/ServiceStack.Redis.Tests/SslTests.cs b/tests/ServiceStack.Redis.Tests/SslTests.cs index 5a570305..ae082a06 100644 --- a/tests/ServiceStack.Redis.Tests/SslTests.cs +++ b/tests/ServiceStack.Redis.Tests/SslTests.cs @@ -23,16 +23,14 @@ public class SslTests private string Password; private string connectionString; - [TestFixtureSetUp] - public void TestFixtureSetUp() + [OneTimeSetUp] + public void OneTimeSetUp() { -#if !NETCORE_SUPPORT var settings = new TextFileSettings("~/azureconfig.txt".MapProjectPath()); Host = settings.GetString("Host"); Port = settings.Get("Port", 6379); Password = settings.GetString("Password"); connectionString = "{0}@{1}".Fmt(Password, Host); -#endif } [Test] @@ -69,6 +67,18 @@ public void Can_connect_to_ssl_azure_redis_with_UrlFormat() } } + [Test] + public void Can_connect_to_ssl_azure_redis_with_UrlFormat_Custom_SSL_Protocol () + { + var url = "redis://{0}?ssl=true&sslprotocols=Tls12&password={1}".Fmt(Host, Password.UrlEncode()); + using (var client = new RedisClient(url)) + { + client.Set("foo", "bar"); + var foo = client.GetValue("foo"); + foo.Print(); + } + } + [Test] public void Can_connect_to_ssl_azure_redis_with_PooledClientsManager() { @@ -95,11 +105,7 @@ public void Can_connect_to_NetworkStream() if (!socket.Connected) { -#if NETCORE - socket.Dispose(); -#else socket.Close(); -#endif throw new Exception("Could not connect"); } @@ -121,11 +127,7 @@ public void Can_connect_to_Buffered_SslStream() if (!socket.Connected) { -#if NETCORE - socket.Dispose(); -#else socket.Close(); -#endif throw new Exception("Could not connect"); } @@ -143,13 +145,8 @@ public void Can_connect_to_Buffered_SslStream() } else { -#if NETCORE - var ctor = typeof(SslStream).GetTypeInfo().GetConstructors() - .First(x => x.GetParameters().Length == 5); -#else var ctor = typeof(SslStream).GetConstructors() .First(x => x.GetParameters().Length == 5); -#endif var policyType = AssemblyUtils.FindType("System.Net.Security.EncryptionPolicy"); var policyValue = Enum.Parse(policyType, "RequireEncryption"); @@ -178,7 +175,7 @@ public void Can_connect_to_Buffered_SslStream() if (!sslStream.IsEncrypted) throw new Exception("Could not establish an encrypted connection to " + Host); - var bstream = new BufferedStream(sslStream, 16 * 1024); + var bstream = new System.IO.BufferedStream(sslStream, 16 * 1024); SendAuth(bstream); } @@ -295,7 +292,7 @@ public void SSL_can_support_64_threads_using_the_client_simultaneously() const int noOfConcurrentClients = 64; //WaitHandle.WaitAll limit is <= 64 var clientAsyncResults = new List(); - using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.SlaveHosts)) + using (var manager = new PooledRedisClientManager(TestConfig.MasterHosts, TestConfig.ReplicaHosts)) { manager.GetClient().Run(x => x.FlushAll()); @@ -331,7 +328,7 @@ private static void UseClient(IRedisClient client, int clientNo, string testData Log("Client '{0}' is using '{1}'", clientNo, client.Host); var testClientKey = "test:" + host + ":" + clientNo; - client.SetEntry(testClientKey, testData); + client.SetValue(testClientKey, testData); var result = client.GetValue(testClientKey) ?? ""; Log("\t{0} => {1} len {2} {3} len", testClientKey, diff --git a/tests/ServiceStack.Redis.Tests/Support/CustomType.cs b/tests/ServiceStack.Redis.Tests/Support/CustomType.cs index 9582d3c7..1620e8fd 100644 --- a/tests/ServiceStack.Redis.Tests/Support/CustomType.cs +++ b/tests/ServiceStack.Redis.Tests/Support/CustomType.cs @@ -1,5 +1,8 @@ +using ServiceStack.Text; + namespace ServiceStack.Redis.Tests.Support { + [RuntimeSerializable] public class CustomType { public long CustomId { get; set; } diff --git a/tests/ServiceStack.Redis.Tests/TestConfig.cs b/tests/ServiceStack.Redis.Tests/TestConfig.cs index 238a223a..744091c8 100644 --- a/tests/ServiceStack.Redis.Tests/TestConfig.cs +++ b/tests/ServiceStack.Redis.Tests/TestConfig.cs @@ -11,14 +11,14 @@ static TestConfig() LogManager.LogFactory = new InMemoryLogFactory(); } - public const bool IgnoreLongTests = true; + public static bool IgnoreLongTests = true; + + public static string SingleHost => Environment.GetEnvironmentVariable("CI_REDIS") ?? "localhost"; + + public static string GeoHost => Environment.GetEnvironmentVariable("CI_REDIS") ?? "10.0.0.121"; - public static string SingleHost - { - get { return Environment.GetEnvironmentVariable("CI_REDIS") ?? "localhost"; } - } public static readonly string[] MasterHosts = new[] { "localhost" }; - public static readonly string[] SlaveHosts = new[] { "localhost" }; + public static readonly string[] ReplicaHosts = new[] { "localhost" }; public const int RedisPort = 6379; diff --git a/tests/ServiceStack.Redis.Tests/TrackThreadTests.cs b/tests/ServiceStack.Redis.Tests/TrackThreadTests.cs new file mode 100644 index 00000000..88ed49aa --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/TrackThreadTests.cs @@ -0,0 +1,98 @@ +using System; +using System.Threading; +using NUnit.Framework; + +namespace ServiceStack.Redis.Tests +{ + public class TrackThreadTests + { + [Test] + public void Does_throw_when_using_same_client_on_different_threads() + { + RedisConfig.AssertAccessOnlyOnSameThread = true; + InvalidAccessException poolEx = null; + + var redisManager = new RedisManagerPool(); + + using (var redis = redisManager.GetClient()) + { + var threadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var key = $"Thread#{threadId}"; + redis.SetValue(key, threadId); + + ThreadPool.QueueUserWorkItem(_ => + { + using (var poolRedis = redisManager.GetClient()) + { + var poolThreadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var poolKey = $"Thread#{poolThreadId}"; + poolRedis.SetValue(poolKey , poolThreadId); + + Console.WriteLine("From Pool: " + poolRedis.GetValue(poolKey)); + + try + { + Console.WriteLine("From Pool (using TEST): " + redis.GetValue(poolKey)); + } + catch (InvalidAccessException ex) + { + poolEx = ex; + } + } + }); + + Thread.Sleep(100); + + Console.WriteLine("From Test: " + redis.GetValue(key)); + + if (poolEx == null) + throw new Exception("Should throw InvalidAccessException"); + + Console.WriteLine("InvalidAccessException: " + poolEx.Message); + } + + RedisConfig.AssertAccessOnlyOnSameThread = false; + } + + [Test] + public void Does_not_throw_when_using_different_clients_on_same_Thread() + { + RedisConfig.AssertAccessOnlyOnSameThread = true; + InvalidAccessException poolEx = null; + + var redisManager = new RedisManagerPool(); + + using (var redis = redisManager.GetClient()) + { + var threadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var key = $"Thread#{threadId}"; + redis.SetValue(key, threadId); + + ThreadPool.QueueUserWorkItem(_ => + { + try + { + using (var poolRedis = redisManager.GetClient()) + { + var poolThreadId = Thread.CurrentThread.ManagedThreadId.ToString(); + var poolKey = $"Thread#{poolThreadId}"; + poolRedis.SetValue(poolKey , poolThreadId); + + Console.WriteLine("From Pool: " + poolRedis.GetValue(poolKey )); + } + } + catch (InvalidAccessException ex) + { + poolEx = ex; + } + }); + + Thread.Sleep(100); + + Console.WriteLine("From Test: " + redis.GetValue(key)); + } + + RedisConfig.AssertAccessOnlyOnSameThread = false; + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs b/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs index e03d95bd..a69f6e9d 100644 --- a/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs +++ b/tests/ServiceStack.Redis.Tests/TwemproxyTests.cs @@ -3,7 +3,7 @@ namespace ServiceStack.Redis.Tests { - [TestFixture, Explicit] + [TestFixture, Ignore("Integration")] public class TwemproxyTests { [Test] @@ -14,8 +14,8 @@ public void Can_connect_to_twemproxy() //ServerVersionNumber = 2611 }; //var redis = new RedisClient("10.0.0.14"); - redis.SetEntry("foo", "bar"); - var foo = redis.GetEntry("foo"); + redis.SetValue("foo", "bar"); + var foo = redis.GetValue("foo"); Assert.That(foo, Is.EqualTo("bar")); } diff --git a/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.Async.cs b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.Async.cs new file mode 100644 index 00000000..f6736364 --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.Async.cs @@ -0,0 +1,436 @@ +using NUnit.Framework; +using ServiceStack.Caching; +using ServiceStack.Logging; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class UserSessionTestsAsync + { + static UserSessionTestsAsync() + { + LogManager.LogFactory = new ConsoleLogFactory(); + } + + //MasterUser master; + + static readonly Guid UserClientGlobalId1 = new Guid("71A30DE3-D7AF-4B8E-BCA2-AB646EE1F3E9"); + static readonly Guid UserClientGlobalId2 = new Guid("A8D300CF-0414-4C99-A495-A7F34C93CDE1"); + static readonly string UserClientKey = new Guid("10B7D0F7-4D4E-4676-AAC7-CF0234E9133E").ToString("N"); + static readonly Guid UserId = new Guid("5697B030-A369-43A2-A842-27303A0A62BC"); + private const string UserName = "User1"; + private const string ShardId = "0"; + + readonly UserClientSession session = new UserClientSession( + Guid.NewGuid(), UserId, "192.168.0.1", UserClientKey, UserClientGlobalId1); + + private RedisClient redisCache; + + [SetUp] + public void OnBeforeEachTest() + { + redisCache = new RedisClient(TestConfig.SingleHost); + redisCache.FlushAll(); + //master = UserMasterDataAccessModel.Instance.MasterUsers.NewDataAccessObject(true); + } + + public CachedUserSessionManagerAsync GetCacheManager(ICacheClientAsync cacheClient) + { + return new CachedUserSessionManagerAsync(cacheClient); + } + + private static void AssertClientSessionsAreEqual( + UserClientSession clientSession, UserClientSession resolvedClientSession) + { + Assert.That(resolvedClientSession.Id, Is.EqualTo(clientSession.Id)); + Assert.That(resolvedClientSession.Base64ClientModulus, Is.EqualTo(clientSession.Base64ClientModulus)); + Assert.That(resolvedClientSession.IPAddress, Is.EqualTo(clientSession.IPAddress)); + Assert.That(resolvedClientSession.UserClientGlobalId, Is.EqualTo(clientSession.UserClientGlobalId)); + Assert.That(resolvedClientSession.UserId, Is.EqualTo(clientSession.UserId)); + } + + [Test] + public async Task Can_add_single_UserSession() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var resolvedClientSession = await cacheManager.GetUserClientSessionAsync( + clientSession.UserId, clientSession.Id); + + AssertClientSessionsAreEqual(clientSession, resolvedClientSession); + } + + [Test] + public async Task Can_add_multiple_UserClientSessions() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession1 = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var clientSession2 = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId2); + + var resolvedClientSession1 = await cacheManager.GetUserClientSessionAsync( + clientSession1.UserId, clientSession1.Id); + + var resolvedClientSession2 = await cacheManager.GetUserClientSessionAsync( + clientSession2.UserId, clientSession2.Id); + + AssertClientSessionsAreEqual(clientSession1, resolvedClientSession1); + AssertClientSessionsAreEqual(clientSession2, resolvedClientSession2); + } + + [Test] + public async Task Does_remove_UserClientSession() + { + var cacheManager = GetCacheManager(redisCache); + + var clientSession1 = await cacheManager.StoreClientSessionAsync( + UserId, + UserName, + ShardId, + session.IPAddress, + UserClientKey, + UserClientGlobalId1); + + var userSession = await cacheManager.GetUserSessionAsync(UserId); + var resolvedClientSession1 = userSession.GetClientSession(clientSession1.Id); + AssertClientSessionsAreEqual(resolvedClientSession1, clientSession1); + + resolvedClientSession1.ExpiryDate = DateTime.UtcNow.AddSeconds(-1); + await cacheManager.UpdateUserSessionAsync(userSession); + + userSession = await cacheManager.GetUserSessionAsync(UserId); + Assert.That(userSession, Is.Null); + } + + } + + public class CachedUserSessionManagerAsync + { + private static readonly ILog Log = LogManager.GetLogger(typeof(CachedUserSessionManager)); + + /// + /// Google/Yahoo seems to make you to login every 2 weeks?? + /// + private readonly ICacheClientAsync cacheClient; + + /// + /// Big perf hit if we Log on every session change + /// + /// The FMT. + /// The args. + [Conditional("DEBUG")] + protected void LogIfDebug(string fmt, params object[] args) + { + if (args.Length > 0) + Log.DebugFormat(fmt, args); + else + Log.Debug(fmt); + } + + public CachedUserSessionManagerAsync(ICacheClientAsync cacheClient) + { + this.cacheClient = cacheClient; + } + + /// + /// Removes the client session. + /// + /// The user global id. + /// The client session ids. + public async ValueTask RemoveClientSession(Guid userId, ICollection clientSessionIds) + { + var userSession = await this.GetUserSessionAsync(userId); + if (userSession == null) return; + + foreach (var clientSessionId in clientSessionIds) + { + userSession.RemoveClientSession(clientSessionId); + } + await this.UpdateUserSessionAsync(userSession); + } + + /// + /// Adds a new client session. + /// Should this be changed to GetOrCreateClientSession? + /// + /// The user global id. + /// Title of the user. + /// + /// The ip address. + /// The base64 client modulus. + /// The user client global id. + /// + public async ValueTask StoreClientSessionAsync(Guid userId, string userName, string shardId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + var userSession = await this.GetOrCreateSessionAsync(userId, userName, shardId); + + var existingClientSession = userSession.GetClientSessionWithClientId(userClientGlobalId); + if (existingClientSession != null) + { + userSession.RemoveClientSession(existingClientSession.Id); + } + + var newClientSession = userSession.CreateNewClientSession( + ipAddress, base64ClientModulus, userClientGlobalId); + + await this.UpdateUserSessionAsync(userSession); + + return newClientSession; + } + + /// + /// Updates the UserSession in the cache, or removes expired ones. + /// + /// The user session. + public async ValueTask UpdateUserSessionAsync(UserSessionAsync userSession) + { + var hasSessionExpired = userSession.HasExpired(); + if (hasSessionExpired) + { + LogIfDebug("Session has expired, removing: " + userSession.ToCacheKey()); + await this.cacheClient.RemoveAsync(userSession.ToCacheKey()); + } + else + { + LogIfDebug("Updating session: " + userSession.ToCacheKey()); + await this.cacheClient.ReplaceAsync(userSession.ToCacheKey(), userSession, userSession.ExpiryDate.Value); + } + } + + /// + /// Gets the user session if it exists or null. + /// + /// The user global id. + /// + public async ValueTask GetUserSessionAsync(Guid userId) + { + var cacheKey = UserSession.ToCacheKey(userId); + var bytes = await this.cacheClient.GetAsync(cacheKey); + if (bytes != null) + { + var modelStr = Encoding.UTF8.GetString(bytes); + LogIfDebug("UserSession => " + modelStr); + } + return await this.cacheClient.GetAsync(cacheKey); + } + + /// + /// Gets or create a user session if one doesn't exist. + /// + /// The user global id. + /// Title of the user. + /// + /// + public async ValueTask GetOrCreateSessionAsync(Guid userId, string userName, string shardId) + { + var userSession = await this.GetUserSessionAsync(userId); + if (userSession == null) + { + userSession = new UserSessionAsync(userId, userName, shardId); + + await this.cacheClient.AddAsync(userSession.ToCacheKey(), userSession, + userSession.ExpiryDate.GetValueOrDefault(DateTime.UtcNow) + TimeSpan.FromHours(1)); + } + return userSession; + } + + /// + /// Gets the user client session identified by the id if exists otherwise null. + /// + /// The user global id. + /// The client session id. + /// + public async ValueTask GetUserClientSessionAsync(Guid userId, Guid clientSessionId) + { + var userSession = await this.GetUserSessionAsync(userId); + return userSession != null ? userSession.GetClientSession(clientSessionId) : null; + } + } + +#if !NETCORE + [Serializable /* was required when storing in memcached, not required in Redis */] +#endif + public class UserSessionAsync + { + //Empty constructor required for TypeSerializer + public UserSessionAsync() + { + this.PublicClientSessions = new Dictionary(); + } + + public Guid UserId { get; private set; } + + public string UserName { get; private set; } + + public string ShardId { get; private set; } + + public Dictionary PublicClientSessions { get; private set; } + + public UserSessionAsync(Guid userId, string userName, string shardId) + : this() + { + this.UserId = userId; + this.UserName = userName; + this.ShardId = shardId; + } + + /// + /// Gets the max expiry date of all the users client sessions. + /// If the user has no more active client sessions we can remove them from the cache. + /// + /// The expiry date. + public DateTime? ExpiryDate + { + get + { + DateTime? maxExpiryDate = null; + + foreach (var session in this.PublicClientSessions.Values) + { + if (maxExpiryDate == null || session.ExpiryDate > maxExpiryDate) + { + maxExpiryDate = session.ExpiryDate; + } + } + return maxExpiryDate; + } + } + + /// + /// Creates a new client session for the user. + /// + /// The ip address. + /// The base64 client modulus. + /// The user client global id. + /// + public UserClientSession CreateNewClientSession(string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + return this.CreateClientSession(Guid.NewGuid(), ipAddress, base64ClientModulus, userClientGlobalId); + } + + public UserClientSession CreateClientSession(Guid sessionId, string ipAddress, string base64ClientModulus, Guid userClientGlobalId) + { + var clientSession = new UserClientSession( + sessionId, this.UserId, ipAddress, base64ClientModulus, userClientGlobalId); + + this.PublicClientSessions[clientSession.Id] = clientSession; + + return clientSession; + } + + /// + /// Removes the client session. + /// + /// The client session id. + public void RemoveClientSession(Guid clientSessionId) + { + if (this.PublicClientSessions.ContainsKey(clientSessionId)) + { + this.PublicClientSessions.Remove(clientSessionId); + } + } + + public UserClientSession GetClientSessionWithClientId(Guid userClientId) + { + foreach (var entry in this.PublicClientSessions) + { + if (entry.Value.UserClientGlobalId == userClientId) + { + return entry.Value; + } + } + + return null; + } + + /// + /// Verifies this UserSession, removing any expired sessions. + /// Returns true to keep the UserSession in the cache. + /// + /// + /// true if this session has any active client sessions; otherwise, false. + /// + public bool HasExpired() + { + RemoveExpiredSessions(this.PublicClientSessions); + + //If there are no more active client sessions we can remove the entire UserSessions + var sessionHasExpired = + this.ExpiryDate == null //There are no UserClientSessions + || this.ExpiryDate.Value <= DateTime.UtcNow; //The max UserClientSession ExpiryDate has expired + + return sessionHasExpired; + } + + private static void RemoveExpiredSessions(IDictionary clientSessions) + { + var expiredSessionKeys = new List(); + + foreach (var clientSession in clientSessions) + { + if (clientSession.Value.ExpiryDate < DateTime.UtcNow) + { + expiredSessionKeys.Add(clientSession.Key); + } + } + + foreach (var sessionKey in expiredSessionKeys) + { + clientSessions.Remove(sessionKey); + } + } + + public void RemoveAllSessions() + { + this.PublicClientSessions.Clear(); + } + + public UserClientSession GetClientSession(Guid clientSessionId) + { + UserClientSession session; + + if (this.PublicClientSessions.TryGetValue(clientSessionId, out session)) + { + return session; + } + + return null; + } + + public string ToCacheKey() + { + return ToCacheKey(this.UserId); + } + + public static string ToCacheKey(Guid userId) + { + return UrnId.Create(userId.ToString()); + } + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs index 90e36402..10ba3109 100644 --- a/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs +++ b/tests/ServiceStack.Redis.Tests/UserSessionRedisClientTests.cs @@ -14,9 +14,7 @@ public class UserSessionTests { static UserSessionTests() { -#if !NETCORE LogManager.LogFactory = new ConsoleLogFactory(); -#endif } //MasterUser master; diff --git a/tests/ServiceStack.Redis.Tests/ValueTypeExamples.Async.cs b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.Async.cs new file mode 100644 index 00000000..c4baa07f --- /dev/null +++ b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.Async.cs @@ -0,0 +1,135 @@ +using NUnit.Framework; +using ServiceStack.Redis.Generic; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace ServiceStack.Redis.Tests +{ + [TestFixture, Category("Integration"), Category("Async")] + public class ValueTypeExamplesAsync + { + [SetUp] + public async Task SetUp() + { + await using var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + await redisClient.FlushAllAsync(); + } + + [Test] + public async Task Working_with_int_values() + { + const string intKey = "intkey"; + const int intValue = 1; + + //STORING AN INT USING THE BASIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + await redisClient.SetValueAsync(intKey, intValue.ToString()); + string strGetIntValue = await redisClient.GetValueAsync(intKey); + int toIntValue = int.Parse(strGetIntValue); + + Assert.That(toIntValue, Is.EqualTo(intValue)); + } + + //STORING AN INT USING THE GENERIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + //Create a generic client that treats all values as ints: + IRedisTypedClientAsync intRedis = redisClient.As(); + + await intRedis.SetValueAsync(intKey, intValue); + var toIntValue = await intRedis.GetValueAsync(intKey); + + Assert.That(toIntValue, Is.EqualTo(intValue)); + } + } + + [Test] + public async Task Working_with_int_list_values() + { + const string intListKey = "intListKey"; + var intValues = new List { 2, 4, 6, 8 }; + + //STORING INTS INTO A LIST USING THE BASIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + IRedisListAsync strList = redisClient.Lists[intListKey]; + + //storing all int values in the redis list 'intListKey' as strings + await intValues.ForEachAsync(async x => await strList.AddAsync(x.ToString())); + + //retrieve all values again as strings + List strListValues = await strList.ToListAsync(); + + //convert back to list of ints + List toIntValues = strListValues.ConvertAll(x => int.Parse(x)); + + Assert.That(toIntValues, Is.EqualTo(intValues)); + + //delete all items in the list + await strList.ClearAsync(); + } + + //STORING INTS INTO A LIST USING THE GENERIC CLIENT + await using (var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly()) + { + //Create a generic client that treats all values as ints: + IRedisTypedClientAsync intRedis = redisClient.As(); + + IRedisListAsync intList = intRedis.Lists[intListKey]; + + //storing all int values in the redis list 'intListKey' as ints + await intValues.ForEachAsync(async x => await intList.AddAsync(x)); + + List toIntListValues = await intList.ToListAsync(); + + Assert.That(toIntListValues, Is.EqualTo(intValues)); + } + } + + public class IntAndString + { + public int Id { get; set; } + public string Letter { get; set; } + } + + [Test] + public async Task Working_with_Generic_types() + { + await using var redisClient = new RedisClient(TestConfig.SingleHost).ForAsyncOnly(); + //Create a typed Redis client that treats all values as IntAndString: + var typedRedis = redisClient.As(); + + var pocoValue = new IntAndString { Id = 1, Letter = "A" }; + await typedRedis.SetValueAsync("pocoKey", pocoValue); + IntAndString toPocoValue = await typedRedis.GetValueAsync("pocoKey"); + + Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); + Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); + + var pocoListValues = new List { + new IntAndString {Id = 2, Letter = "B"}, + new IntAndString {Id = 3, Letter = "C"}, + new IntAndString {Id = 4, Letter = "D"}, + new IntAndString {Id = 5, Letter = "E"}, + }; + + IRedisListAsync pocoList = typedRedis.Lists["pocoListKey"]; + + //Adding all IntAndString objects into the redis list 'pocoListKey' + await pocoListValues.ForEachAsync(async x => await pocoList.AddAsync(x)); + + List toPocoListValues = await pocoList.ToListAsync(); + + for (var i = 0; i < pocoListValues.Count; i++) + { + pocoValue = pocoListValues[i]; + toPocoValue = toPocoListValues[i]; + Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); + Assert.That(toPocoValue.Letter, Is.EqualTo(pocoValue.Letter)); + } + } + + } +} \ No newline at end of file diff --git a/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs index efebf37b..f0588d62 100644 --- a/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs +++ b/tests/ServiceStack.Redis.Tests/ValueTypeExamples.cs @@ -26,7 +26,7 @@ public void Working_with_int_values() //STORING AN INT USING THE BASIC CLIENT using (var redisClient = new RedisClient(TestConfig.SingleHost)) { - redisClient.SetEntry(intKey, intValue.ToString()); + redisClient.SetValue(intKey, intValue.ToString()); string strGetIntValue = redisClient.GetValue(intKey); int toIntValue = int.Parse(strGetIntValue); @@ -39,7 +39,7 @@ public void Working_with_int_values() //Create a generic client that treats all values as ints: IRedisTypedClient intRedis = redisClient.As(); - intRedis.SetEntry(intKey, intValue); + intRedis.SetValue(intKey, intValue); var toIntValue = intRedis.GetValue(intKey); Assert.That(toIntValue, Is.EqualTo(intValue)); @@ -104,7 +104,7 @@ public void Working_with_Generic_types() var typedRedis = redisClient.As(); var pocoValue = new IntAndString { Id = 1, Letter = "A" }; - typedRedis.SetEntry("pocoKey", pocoValue); + typedRedis.SetValue("pocoKey", pocoValue); IntAndString toPocoValue = typedRedis.GetValue("pocoKey"); Assert.That(toPocoValue.Id, Is.EqualTo(pocoValue.Id)); diff --git a/tests/ServiceStack.Redis.Tests/packages.config b/tests/ServiceStack.Redis.Tests/packages.config deleted file mode 100644 index 40226391..00000000 --- a/tests/ServiceStack.Redis.Tests/packages.config +++ /dev/null @@ -1,5 +0,0 @@ - - - - - \ No newline at end of file