extends RespType<
+ RESP_TYPES['MAP'],
+ {
+ [P in T[number] as ExtractMapKey]: P[1];
+ },
+ Map, T[number][1]> | FlattenTuples
+> {}
+
+type FlattenTuples = (
+ T extends [] ? [] :
+ T extends [MapKeyValue] ? T[0] :
+ T extends [MapKeyValue, ...infer R] ? [
+ ...T[0],
+ ...FlattenTuples
+ ] :
+ never
+);
+
+export type ReplyUnion = (
+ NullReply |
+ BooleanReply |
+ NumberReply |
+ BigNumberReply |
+ DoubleReply |
+ SimpleStringReply |
+ BlobStringReply |
+ VerbatimStringReply |
+ SimpleErrorReply |
+ BlobErrorReply |
+ ArrayReply |
+ SetReply |
+ MapReply
+);
+
+export type MappedType = ((...args: any) => T) | (new (...args: any) => T);
+
+type InferTypeMapping = T extends RespType ? FLAG_TYPES : never;
+
+export type TypeMapping = {
+ [P in RespTypes]?: MappedType>>>;
+};
+
+type MapKey<
+ T,
+ TYPE_MAPPING extends TypeMapping
+> = ReplyWithTypeMapping;
+
+export type UnwrapReply> = REPLY['DEFAULT' | 'TYPES'];
+
+export type ReplyWithTypeMapping<
+ REPLY,
+ TYPE_MAPPING extends TypeMapping
+> = (
+ // if REPLY is a type, extract the coresponding type from TYPE_MAPPING or use the default type
+ REPLY extends RespType ?
+ TYPE_MAPPING[RESP_TYPE] extends MappedType ?
+ ReplyWithTypeMapping, TYPE_MAPPING> :
+ ReplyWithTypeMapping
+ : (
+ // if REPLY is a known generic type, convert its generic arguments
+ // TODO: tuples?
+ REPLY extends Array ? Array> :
+ REPLY extends Set ? Set> :
+ REPLY extends Map ? Map, ReplyWithTypeMapping> :
+ // `Date | Buffer | Error` are supersets of `Record`, so they need to be checked first
+ REPLY extends Date | Buffer | Error ? REPLY :
+ REPLY extends Record ? {
+ [P in keyof REPLY]: ReplyWithTypeMapping;
+ } :
+ // otherwise, just return the REPLY as is
+ REPLY
+ )
+);
+
+export type TransformReply = (this: void, reply: any, preserve?: any, typeMapping?: TypeMapping) => any; // TODO;
+
+export type RedisArgument = string | Buffer;
+
+export type CommandArguments = Array & { preserve?: unknown };
+
+// export const REQUEST_POLICIES = {
+// /**
+// * TODO
+// */
+// ALL_NODES: 'all_nodes',
+// /**
+// * TODO
+// */
+// ALL_SHARDS: 'all_shards',
+// /**
+// * TODO
+// */
+// SPECIAL: 'special'
+// } as const;
+
+// export type REQUEST_POLICIES = typeof REQUEST_POLICIES;
+
+// export type RequestPolicies = REQUEST_POLICIES[keyof REQUEST_POLICIES];
+
+// export const RESPONSE_POLICIES = {
+// /**
+// * TODO
+// */
+// ONE_SUCCEEDED: 'one_succeeded',
+// /**
+// * TODO
+// */
+// ALL_SUCCEEDED: 'all_succeeded',
+// /**
+// * TODO
+// */
+// LOGICAL_AND: 'agg_logical_and',
+// /**
+// * TODO
+// */
+// SPECIAL: 'special'
+// } as const;
+
+// export type RESPONSE_POLICIES = typeof RESPONSE_POLICIES;
+
+// export type ResponsePolicies = RESPONSE_POLICIES[keyof RESPONSE_POLICIES];
+
+// export type CommandPolicies = {
+// request?: RequestPolicies | null;
+// response?: ResponsePolicies | null;
+// };
+
+export type Command = {
+ CACHEABLE?: boolean;
+ IS_READ_ONLY?: boolean;
+ /**
+ * @internal
+ * TODO: remove once `POLICIES` is implemented
+ */
+ IS_FORWARD_COMMAND?: boolean;
+ NOT_KEYED_COMMAND?: true;
+ // POLICIES?: CommandPolicies;
+ parseCommand(this: void, parser: CommandParser, ...args: Array): void;
+ TRANSFORM_LEGACY_REPLY?: boolean;
+ transformReply: TransformReply | Record;
+ unstableResp3?: boolean;
+};
+
+export type RedisCommands = Record;
+
+export type RedisModules = Record;
+
+export interface RedisFunction extends Command {
+ NUMBER_OF_KEYS?: number;
+}
+
+export type RedisFunctions = Record>;
+
+export type RedisScript = RedisScriptConfig & SHA1;
+
+export type RedisScripts = Record;
+
+// TODO: move to Commander?
+export interface CommanderConfig<
+ M extends RedisModules,
+ F extends RedisFunctions,
+ S extends RedisScripts,
+ RESP extends RespVersions
+> {
+ modules?: M;
+ functions?: F;
+ scripts?: S;
+ /**
+ * Specifies the Redis Serialization Protocol version to use.
+ * RESP2 is the default (value 2), while RESP3 (value 3) provides
+ * additional data types and features introduced in Redis 6.0.
+ */
+ RESP?: RESP;
+ /**
+ * When set to true, enables commands that have unstable RESP3 implementations.
+ * When using RESP3 protocol, commands marked as having unstable RESP3 support
+ * will throw an error unless this flag is explicitly set to true.
+ * This primarily affects modules like Redis Search where response formats
+ * in RESP3 mode may change in future versions.
+ */
+ unstableResp3?: boolean;
+}
+
+type Resp2Array = (
+ T extends [] ? [] :
+ T extends [infer ITEM] ? [Resp2Reply- ] :
+ T extends [infer ITEM, ...infer REST] ? [
+ Resp2Reply
- ,
+ ...Resp2Array
+ ] :
+ T extends Array ? Array> :
+ never
+);
+
+export type Resp2Reply = (
+ RESP3REPLY extends RespType ?
+ // TODO: RESP3 only scalar types
+ RESP_TYPE extends RESP_TYPES['DOUBLE'] ? BlobStringReply :
+ RESP_TYPE extends RESP_TYPES['ARRAY'] | RESP_TYPES['SET'] ? RespType<
+ RESP_TYPE,
+ Resp2Array
+ > :
+ RESP_TYPE extends RESP_TYPES['MAP'] ? RespType<
+ RESP_TYPES['ARRAY'],
+ Resp2Array>>
+ > :
+ RESP3REPLY :
+ RESP3REPLY
+);
+
+export type RespVersions = 2 | 3;
+
+export type CommandReply<
+ COMMAND extends Command,
+ RESP extends RespVersions
+> = (
+ // if transformReply is a function, use its return type
+ COMMAND['transformReply'] extends (...args: any) => infer T ? T :
+ // if transformReply[RESP] is a function, use its return type
+ COMMAND['transformReply'] extends Record infer T> ? T :
+ // otherwise use the generic reply type
+ ReplyUnion
+);
+
+export type CommandSignature<
+ COMMAND extends Command,
+ RESP extends RespVersions,
+ TYPE_MAPPING extends TypeMapping
+> = (...args: Tail>) => Promise, TYPE_MAPPING>>;
+
+// export type CommandWithPoliciesSignature<
+// COMMAND extends Command,
+// RESP extends RespVersions,
+// TYPE_MAPPING extends TypeMapping,
+// POLICIES extends CommandPolicies
+// > = (...args: Parameters) => Promise<
+// ReplyWithPolicy<
+// ReplyWithTypeMapping, TYPE_MAPPING>,
+// MergePolicies
+// >
+// >;
+
+// export type MergePolicies<
+// COMMAND extends Command,
+// POLICIES extends CommandPolicies
+// > = Omit & POLICIES;
+
+// type ReplyWithPolicy<
+// REPLY,
+// POLICIES extends CommandPolicies,
+// > = (
+// POLICIES['request'] extends REQUEST_POLICIES['SPECIAL'] ? never :
+// POLICIES['request'] extends null | undefined ? REPLY :
+// unknown extends POLICIES['request'] ? REPLY :
+// POLICIES['response'] extends RESPONSE_POLICIES['SPECIAL'] ? never :
+// POLICIES['response'] extends RESPONSE_POLICIES['ALL_SUCCEEDED' | 'ONE_SUCCEEDED' | 'LOGICAL_AND'] ? REPLY :
+// // otherwise, return array of replies
+// Array
+// );
diff --git a/packages/client/lib/RESP/verbatim-string.ts b/packages/client/lib/RESP/verbatim-string.ts
new file mode 100644
index 00000000000..92ff4fe3fb1
--- /dev/null
+++ b/packages/client/lib/RESP/verbatim-string.ts
@@ -0,0 +1,8 @@
+export class VerbatimString extends String {
+ constructor(
+ public format: string,
+ value: string
+ ) {
+ super(value);
+ }
+}
diff --git a/packages/client/lib/authx/credentials-provider.ts b/packages/client/lib/authx/credentials-provider.ts
new file mode 100644
index 00000000000..667795be9b3
--- /dev/null
+++ b/packages/client/lib/authx/credentials-provider.ts
@@ -0,0 +1,102 @@
+import { Disposable } from './disposable';
+/**
+ * Provides credentials asynchronously.
+ */
+export interface AsyncCredentialsProvider {
+ readonly type: 'async-credentials-provider';
+ credentials: () => Promise
+}
+
+/**
+ * Provides credentials asynchronously with support for continuous updates via a subscription model.
+ * This is useful for environments where credentials are frequently rotated or updated or can be revoked.
+ */
+export interface StreamingCredentialsProvider {
+ readonly type: 'streaming-credentials-provider';
+
+ /**
+ * Provides initial credentials and subscribes to subsequent updates. This is used internally by the node-redis client
+ * to handle credential rotation and re-authentication.
+ *
+ * Note: The node-redis client manages the subscription lifecycle automatically. Users only need to implement
+ * onReAuthenticationError if they want to be notified about authentication failures.
+ *
+ * Error handling:
+ * - Errors received via onError indicate a fatal issue with the credentials stream
+ * - The stream is automatically closed(disposed) when onError occurs
+ * - onError typically mean the provider failed to fetch new credentials after retrying
+ *
+ * @example
+ * ```ts
+ * const provider = getStreamingProvider();
+ * const [initialCredentials, disposable] = await provider.subscribe({
+ * onNext: (newCredentials) => {
+ * // Handle credential update
+ * },
+ * onError: (error) => {
+ * // Handle fatal stream error
+ * }
+ * });
+ *
+ * @param listener - Callbacks to handle credential updates and errors
+ * @returns A Promise resolving to [initial credentials, cleanup function]
+ */
+ subscribe: (listener: StreamingCredentialsListener) => Promise<[BasicAuth, Disposable]>
+
+ /**
+ * Called when authentication fails or credentials cannot be renewed in time.
+ * Implement this to handle authentication errors in your application.
+ *
+ * @param error - Either a CredentialsError (invalid/expired credentials) or
+ * UnableToObtainNewCredentialsError (failed to fetch new credentials on time)
+ */
+ onReAuthenticationError: (error: ReAuthenticationError) => void;
+
+}
+
+/**
+ * Type representing basic authentication credentials.
+ */
+export type BasicAuth = { username?: string, password?: string }
+
+/**
+ * Callback to handle credential updates and errors.
+ */
+export type StreamingCredentialsListener = {
+ onNext: (credentials: T) => void;
+ onError: (e: Error) => void;
+}
+
+
+/**
+ * Providers that can supply authentication credentials
+ */
+export type CredentialsProvider = AsyncCredentialsProvider | StreamingCredentialsProvider
+
+/**
+ * Errors that can occur during re-authentication.
+ */
+export type ReAuthenticationError = CredentialsError | UnableToObtainNewCredentialsError
+
+/**
+ * Thrown when re-authentication fails with provided credentials .
+ * e.g. when the credentials are invalid, expired or revoked.
+ *
+ */
+export class CredentialsError extends Error {
+ constructor(message: string) {
+ super(`Re-authentication with latest credentials failed: ${message}`);
+ this.name = 'CredentialsError';
+ }
+
+}
+
+/**
+ * Thrown when new credentials cannot be obtained before current ones expire
+ */
+export class UnableToObtainNewCredentialsError extends Error {
+ constructor(message: string) {
+ super(`Unable to obtain new credentials : ${message}`);
+ this.name = 'UnableToObtainNewCredentialsError';
+ }
+}
\ No newline at end of file
diff --git a/packages/client/lib/authx/disposable.ts b/packages/client/lib/authx/disposable.ts
new file mode 100644
index 00000000000..ee4526a37bd
--- /dev/null
+++ b/packages/client/lib/authx/disposable.ts
@@ -0,0 +1,6 @@
+/**
+ * Represents a resource that can be disposed.
+ */
+export interface Disposable {
+ dispose(): void;
+}
\ No newline at end of file
diff --git a/packages/client/lib/authx/identity-provider.ts b/packages/client/lib/authx/identity-provider.ts
new file mode 100644
index 00000000000..a2d25c8f9db
--- /dev/null
+++ b/packages/client/lib/authx/identity-provider.ts
@@ -0,0 +1,22 @@
+/**
+ * An identity provider is responsible for providing a token that can be used to authenticate with a service.
+ */
+
+/**
+ * The response from an identity provider when requesting a token.
+ *
+ * note: "native" refers to the type of the token that the actual identity provider library is using.
+ *
+ * @type T The type of the native idp token.
+ * @property token The token.
+ * @property ttlMs The time-to-live of the token in epoch milliseconds extracted from the native token in local time.
+ */
+export type TokenResponse = { token: T, ttlMs: number };
+
+export interface IdentityProvider {
+ /**
+ * Request a token from the identity provider.
+ * @returns A promise that resolves to an object containing the token and the time-to-live in epoch milliseconds.
+ */
+ requestToken(): Promise>;
+}
\ No newline at end of file
diff --git a/packages/client/lib/authx/index.ts b/packages/client/lib/authx/index.ts
new file mode 100644
index 00000000000..ce611e1497f
--- /dev/null
+++ b/packages/client/lib/authx/index.ts
@@ -0,0 +1,15 @@
+export { TokenManager, TokenManagerConfig, TokenStreamListener, RetryPolicy, IDPError } from './token-manager';
+export {
+ CredentialsProvider,
+ StreamingCredentialsProvider,
+ UnableToObtainNewCredentialsError,
+ CredentialsError,
+ StreamingCredentialsListener,
+ AsyncCredentialsProvider,
+ ReAuthenticationError,
+ BasicAuth
+} from './credentials-provider';
+export { Token } from './token';
+export { IdentityProvider, TokenResponse } from './identity-provider';
+
+export { Disposable } from './disposable'
\ No newline at end of file
diff --git a/packages/client/lib/authx/token-manager.spec.ts b/packages/client/lib/authx/token-manager.spec.ts
new file mode 100644
index 00000000000..1cc2a207edc
--- /dev/null
+++ b/packages/client/lib/authx/token-manager.spec.ts
@@ -0,0 +1,588 @@
+import { strict as assert } from 'node:assert';
+import { Token } from './token';
+import { IDPError, RetryPolicy, TokenManager, TokenManagerConfig, TokenStreamListener } from './token-manager';
+import { IdentityProvider, TokenResponse } from './identity-provider';
+import { setTimeout } from 'timers/promises';
+
+describe('TokenManager', () => {
+
+ /**
+ * Helper function to delay execution for a given number of milliseconds.
+ * @param ms
+ */
+ const delay = (ms: number) => {
+ return setTimeout(ms);
+ }
+
+ /**
+ * IdentityProvider that returns a fixed test token for testing and doesn't handle TTL.
+ */
+ class TestIdentityProvider implements IdentityProvider {
+ requestToken(): Promise> {
+ return Promise.resolve({ token: 'test-token 1', ttlMs: 1000 });
+ }
+ }
+
+ /**
+ * Helper function to create a test token with a given TTL .
+ * @param ttlMs Time-to-live in milliseconds
+ */
+ const createToken = (ttlMs: number): Token => {
+ return new Token('test-token', ttlMs, 0);
+ };
+
+ /**
+ * Listener that records received tokens and errors for testing.
+ */
+ class TestListener implements TokenStreamListener {
+
+ public readonly receivedTokens: Token[] = [];
+ public readonly errors: IDPError[] = [];
+
+ onNext(token: Token): void {
+ this.receivedTokens.push(token);
+ }
+
+ onError(error: IDPError): void {
+ this.errors.push(error);
+ }
+ }
+
+ /**
+ * IdentityProvider that returns a sequence of tokens with a fixed delay simulating network latency.
+ * Used for testing token refresh scenarios.
+ */
+ class ControlledIdentityProvider implements IdentityProvider {
+ private tokenIndex = 0;
+ private readonly delayMs: number;
+ private readonly ttlMs: number;
+
+ constructor(
+ private readonly tokens: string[],
+ delayMs: number = 0,
+ tokenTTlMs: number = 100
+ ) {
+ this.delayMs = delayMs;
+ this.ttlMs = tokenTTlMs;
+ }
+
+ async requestToken(): Promise> {
+
+ if (this.tokenIndex >= this.tokens.length) {
+ throw new Error('No more test tokens available');
+ }
+
+ if (this.delayMs > 0) {
+ await setTimeout(this.delayMs);
+ }
+
+ return { token: this.tokens[this.tokenIndex++], ttlMs: this.ttlMs };
+ }
+
+ }
+
+ /**
+ * IdentityProvider that simulates various error scenarios with configurable behavior
+ */
+ class ErrorSimulatingProvider implements IdentityProvider {
+ private requestCount = 0;
+
+ constructor(
+ private readonly errorSequence: Array,
+ private readonly delayMs: number = 0,
+ private readonly ttlMs: number = 100
+ ) {}
+
+ async requestToken(): Promise> {
+
+ if (this.delayMs > 0) {
+ await delay(this.delayMs);
+ }
+
+ const result = this.errorSequence[this.requestCount];
+ this.requestCount++;
+
+ if (result instanceof Error) {
+ throw result;
+ } else if (typeof result === 'string') {
+ return { token: result, ttlMs: this.ttlMs };
+ } else {
+ throw new Error('No more responses configured');
+ }
+ }
+
+ getRequestCount(): number {
+ return this.requestCount;
+ }
+ }
+
+ describe('constructor validation', () => {
+ it('should throw error if ratio is greater than 1', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 1.1
+ };
+
+ assert.throws(
+ () => new TokenManager(new TestIdentityProvider(), config),
+ /expirationRefreshRatio must be less than or equal to 1/
+ );
+ });
+
+ it('should throw error if ratio is negative', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: -0.1
+ };
+
+ assert.throws(
+ () => new TokenManager(new TestIdentityProvider(), config),
+ /expirationRefreshRatio must be greater or equal to 0/
+ );
+ });
+
+ it('should accept ratio of 1', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 1
+ };
+
+ assert.doesNotThrow(
+ () => new TokenManager(new TestIdentityProvider(), config)
+ );
+ });
+
+ it('should accept ratio of 0', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0
+ };
+
+ assert.doesNotThrow(
+ () => new TokenManager(new TestIdentityProvider(), config)
+ );
+ });
+ });
+
+ describe('calculateRefreshTime', () => {
+ it('should calculate correct refresh time with 0.8 ratio', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8
+ };
+
+ const manager = new TokenManager(new TestIdentityProvider(), config);
+ const token = createToken(1000);
+ const refreshTime = manager.calculateRefreshTime(token, 0);
+
+ // With 1000s TTL and 0.8 ratio, should refresh at 800s
+ assert.equal(refreshTime, 800);
+ });
+
+ it('should return 0 for ratio of 0', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0
+ };
+
+ const manager = new TokenManager(new TestIdentityProvider(), config);
+ const token = createToken(1000);
+ const refreshTime = manager.calculateRefreshTime(token, 0);
+
+ assert.equal(refreshTime, 0);
+ });
+
+ it('should refresh at expiration time with ratio of 1', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 1
+ };
+
+ const manager = new TokenManager(new TestIdentityProvider(), config);
+ const token = createToken(1000);
+ const refreshTime = manager.calculateRefreshTime(token, 0);
+
+ assert.equal(refreshTime, 1000);
+ });
+
+ it('should handle short TTL tokens', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8
+ };
+
+ const manager = new TokenManager(new TestIdentityProvider(), config);
+ const token = createToken(5);
+ const refreshTime = manager.calculateRefreshTime(token, 0);
+
+ assert.equal(refreshTime, 4);
+ });
+
+ it('should handle expired tokens', () => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8
+ };
+
+ const manager = new TokenManager(new TestIdentityProvider(), config);
+ // Create token that expired 100s ago
+ const token = createToken(-100);
+ const refreshTime = manager.calculateRefreshTime(token, 0);
+
+ // Should return refresh time of 0 for expired tokens
+ assert.equal(refreshTime, 0);
+ });
+ describe('token refresh scenarios', () => {
+
+ describe('token refresh', () => {
+ it('should handle token refresh', async () => {
+ const networkDelay = 20;
+ const tokenTtl = 100;
+
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8
+ };
+
+ const identityProvider = new ControlledIdentityProvider(['token1', 'token2', 'token3'], networkDelay, tokenTtl);
+ const manager = new TokenManager(identityProvider, config);
+ const listener = new TestListener();
+ const disposable = manager.start(listener);
+
+ assert.equal(manager.getCurrentToken(), null, 'Should not have token yet');
+ // Wait for the first token request to complete ( it should be immediate, and we should wait only for the network delay)
+ await delay(networkDelay)
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should receive initial token');
+ assert.equal(listener.receivedTokens[0].value, 'token1', 'Should have correct token value');
+ assert.equal(listener.receivedTokens[0].expiresAtMs - listener.receivedTokens[0].receivedAtMs,
+ tokenTtl, 'Should have correct TTL');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors: ' + listener.errors);
+ assert.equal(manager.getCurrentToken().value, 'token1', 'Should have current token');
+
+ await delay(80);
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token yet');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+
+ await delay(networkDelay);
+
+ assert.equal(listener.receivedTokens.length, 2, 'Should receive second token');
+ assert.equal(listener.receivedTokens[1].value, 'token2', 'Should have correct token value');
+ assert.equal(listener.receivedTokens[1].expiresAtMs - listener.receivedTokens[1].receivedAtMs,
+ tokenTtl, 'Should have correct TTL');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+ assert.equal(manager.getCurrentToken().value, 'token2', 'Should have current token');
+
+ await delay(80);
+
+ assert.equal(listener.receivedTokens.length, 2, 'Should not receive new token yet');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+
+ await delay(networkDelay);
+
+ assert.equal(listener.receivedTokens.length, 3, 'Should receive third token');
+ assert.equal(listener.receivedTokens[2].value, 'token3', 'Should have correct token value');
+ assert.equal(listener.receivedTokens[2].expiresAtMs - listener.receivedTokens[2].receivedAtMs,
+ tokenTtl, 'Should have correct TTL');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+ assert.equal(manager.getCurrentToken().value, 'token3', 'Should have current token');
+
+ disposable?.dispose();
+ });
+ });
+ });
+ });
+
+ describe('TokenManager error handling', () => {
+
+ describe('error scenarios', () => {
+ it('should not recover if retries are not enabled', async () => {
+
+ const networkDelay = 20;
+ const tokenTtl = 100;
+
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8
+ };
+
+ const identityProvider = new ErrorSimulatingProvider(
+ [
+ 'token1',
+ new Error('Fatal error'),
+ 'token3'
+ ],
+ networkDelay,
+ tokenTtl
+ );
+
+ const manager = new TokenManager(identityProvider, config);
+ const listener = new TestListener();
+ const disposable = manager.start(listener);
+
+ await delay(networkDelay);
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should receive initial token');
+ assert.equal(listener.receivedTokens[0].value, 'token1', 'Should have correct initial token');
+ assert.equal(listener.receivedTokens[0].expiresAtMs - listener.receivedTokens[0].receivedAtMs,
+ tokenTtl, 'Should have correct TTL');
+ assert.equal(listener.errors.length, 0, 'Should not have errors yet');
+
+ await delay(80);
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token yet');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+
+ await delay(networkDelay);
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token after failure');
+ assert.equal(listener.errors.length, 1, 'Should receive error');
+ assert.equal(listener.errors[0].message, 'Fatal error', 'Should have correct error message');
+ assert.equal(listener.errors[0].isRetryable, false, 'Should be a fatal error');
+
+ // verify that the token manager is stopped and no more requests are made after the error and expected refresh time
+ await delay(80);
+
+ assert.equal(identityProvider.getRequestCount(), 2, 'Should not make more requests after error');
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token after error');
+ assert.equal(listener.errors.length, 1, 'Should not receive more errors after error');
+ assert.equal(manager.isRunning(), false, 'Should stop token manager after error');
+
+ disposable?.dispose();
+ });
+
+ it('should handle retries with exponential backoff', async () => {
+ const networkDelay = 20;
+ const tokenTtl = 100;
+
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8,
+ retry: {
+ maxAttempts: 3,
+ initialDelayMs: 100,
+ maxDelayMs: 1000,
+ backoffMultiplier: 2,
+ isRetryable: (error: unknown) => error instanceof Error && error.message === 'Temporary failure'
+ }
+ };
+
+ const identityProvider = new ErrorSimulatingProvider(
+ [
+ 'initial-token',
+ new Error('Temporary failure'), // First attempt fails
+ new Error('Temporary failure'), // First retry fails
+ 'recovery-token' // Second retry succeeds
+ ],
+ networkDelay,
+ tokenTtl
+ );
+
+ const manager = new TokenManager(identityProvider, config);
+ const listener = new TestListener();
+ const disposable = manager.start(listener);
+
+ // Wait for initial token
+ await delay(networkDelay);
+ assert.equal(listener.receivedTokens.length, 1, 'Should receive initial token');
+ assert.equal(listener.receivedTokens[0].value, 'initial-token', 'Should have correct initial token');
+ assert.equal(listener.receivedTokens[0].expiresAtMs - listener.receivedTokens[0].receivedAtMs,
+ tokenTtl, 'Should have correct TTL');
+ assert.equal(listener.errors.length, 0, 'Should not have errors yet');
+
+ await delay(80);
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token yet');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+
+ await delay(networkDelay);
+
+ // Should have first error but not stop due to retry config
+ assert.equal(listener.errors.length, 1, 'Should have first error');
+ assert.ok(listener.errors[0].message.includes('attempt 1'), 'Error should indicate first attempt');
+ assert.equal(listener.errors[0].isRetryable, true, 'Should not be a fatal error');
+ assert.equal(manager.isRunning(), true, 'Should continue running during retries');
+
+ // Advance past first retry (delay: 100ms due to backoff)
+ await delay(100);
+
+ assert.equal(listener.errors.length, 1, 'Should not have the second error yet');
+
+ await delay(networkDelay);
+
+ assert.equal(listener.errors.length, 2, 'Should have second error');
+ assert.ok(listener.errors[1].message.includes('attempt 2'), 'Error should indicate second attempt');
+ assert.equal(listener.errors[0].isRetryable, true, 'Should not be a fatal error');
+ assert.equal(manager.isRunning(), true, 'Should continue running during retries');
+
+ // Advance past second retry (delay: 200ms due to backoff)
+ await delay(200);
+
+ assert.equal(listener.errors.length, 2, 'Should not have another error');
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token yet');
+
+ await delay(networkDelay);
+
+ // Should have recovered
+ assert.equal(listener.receivedTokens.length, 2, 'Should receive recovery token');
+ assert.equal(listener.receivedTokens[1].value, 'recovery-token', 'Should have correct recovery token');
+ assert.equal(listener.receivedTokens[1].expiresAtMs - listener.receivedTokens[1].receivedAtMs,
+ tokenTtl, 'Should have correct TTL');
+ assert.equal(manager.isRunning(), true, 'Should continue running after recovery');
+ assert.equal(identityProvider.getRequestCount(), 4, 'Should have made exactly 4 requests');
+
+ disposable?.dispose();
+ });
+
+ it('should stop after max retries exceeded', async () => {
+ const networkDelay = 20;
+ const tokenTtl = 100;
+
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8,
+ retry: {
+ maxAttempts: 2, // Only allow 2 retries
+ initialDelayMs: 100,
+ maxDelayMs: 1000,
+ backoffMultiplier: 2,
+ jitterPercentage: 0,
+ isRetryable: (error: unknown) => error instanceof Error && error.message === 'Temporary failure'
+ }
+ };
+
+ // All attempts must fail
+ const identityProvider = new ErrorSimulatingProvider(
+ [
+ 'initial-token',
+ new Error('Temporary failure'),
+ new Error('Temporary failure'),
+ new Error('Temporary failure')
+ ],
+ networkDelay,
+ tokenTtl
+ );
+
+ const manager = new TokenManager(identityProvider, config);
+ const listener = new TestListener();
+ const disposable = manager.start(listener);
+
+ // Wait for initial token
+ await delay(networkDelay);
+ assert.equal(listener.receivedTokens.length, 1, 'Should receive initial token');
+
+ await delay(80);
+
+ assert.equal(listener.receivedTokens.length, 1, 'Should not receive new token yet');
+ assert.equal(listener.errors.length, 0, 'Should not have any errors');
+
+ //wait for the "network call" to complete
+ await delay(networkDelay);
+
+ // First error
+ assert.equal(listener.errors.length, 1, 'Should have first error');
+ assert.equal(manager.isRunning(), true, 'Should continue running after first error');
+ assert.equal(listener.errors[0].isRetryable, true, 'Should not be a fatal error');
+
+ // Advance past first retry
+ await delay(100);
+
+ assert.equal(listener.errors.length, 1, 'Should not have second error yet');
+
+ //wait for the "network call" to complete
+ await delay(networkDelay);
+
+ // Second error
+ assert.equal(listener.errors.length, 2, 'Should have second error');
+ assert.equal(manager.isRunning(), true, 'Should continue running after second error');
+ assert.equal(listener.errors[1].isRetryable, true, 'Should not be a fatal error');
+
+ // Advance past second retry
+ await delay(200);
+
+ assert.equal(listener.errors.length, 2, 'Should not have third error yet');
+
+ //wait for the "network call" to complete
+ await delay(networkDelay);
+
+ // Should stop after max retries
+ assert.equal(listener.errors.length, 3, 'Should have final error');
+ assert.equal(listener.errors[2].isRetryable, false, 'Should be a fatal error');
+ assert.equal(manager.isRunning(), false, 'Should stop after max retries exceeded');
+ assert.equal(identityProvider.getRequestCount(), 4, 'Should have made exactly 4 requests');
+
+ disposable?.dispose();
+
+ });
+ });
+ });
+
+ describe('TokenManager retry delay calculations', () => {
+ const createManager = (retryConfig: Partial) => {
+ const config: TokenManagerConfig = {
+ expirationRefreshRatio: 0.8,
+ retry: {
+ maxAttempts: 3,
+ initialDelayMs: 100,
+ maxDelayMs: 1000,
+ backoffMultiplier: 2,
+ ...retryConfig
+ }
+ };
+ return new TokenManager(new TestIdentityProvider(), config);
+ };
+
+ describe('calculateRetryDelay', () => {
+
+ it('should apply exponential backoff', () => {
+ const manager = createManager({
+ initialDelayMs: 100,
+ backoffMultiplier: 2,
+ jitterPercentage: 0
+ });
+
+ // Test multiple retry attempts
+ const expectedDelays = [
+ [1, 100], // First attempt: initialDelay * (2^0) = 100
+ [2, 200], // Second attempt: initialDelay * (2^1) = 200
+ [3, 400], // Third attempt: initialDelay * (2^2) = 400
+ [4, 800], // Fourth attempt: initialDelay * (2^3) = 800
+ [5, 1000] // Fifth attempt: would be 1600, but capped at maxDelay (1000)
+ ];
+
+ for (const [attempt, expectedDelay] of expectedDelays) {
+ manager['retryAttempt'] = attempt;
+ assert.equal(
+ manager.calculateRetryDelay(),
+ expectedDelay,
+ `Incorrect delay for attempt ${attempt}`
+ );
+ }
+ });
+
+ it('should respect maxDelayMs', () => {
+ const manager = createManager({
+ initialDelayMs: 100,
+ maxDelayMs: 300,
+ backoffMultiplier: 2,
+ jitterPercentage: 0
+ });
+
+ // Test that delays are capped at maxDelayMs
+ const expectedDelays = [
+ [1, 100], // First attempt: 100
+ [2, 200], // Second attempt: 200
+ [3, 300], // Third attempt: would be 400, capped at 300
+ [4, 300], // Fourth attempt: would be 800, capped at 300
+ [5, 300] // Fifth attempt: would be 1600, capped at 300
+ ];
+
+ for (const [attempt, expectedDelay] of expectedDelays) {
+ manager['retryAttempt'] = attempt;
+ assert.equal(
+ manager.calculateRetryDelay(),
+ expectedDelay,
+ `Incorrect delay for attempt ${attempt}`
+ );
+ }
+ });
+
+ it('should return 0 when no retry config is present', () => {
+ const manager = new TokenManager(new TestIdentityProvider(), {
+ expirationRefreshRatio: 0.8
+ });
+ manager['retryAttempt'] = 1;
+ assert.equal(manager.calculateRetryDelay(), 0);
+ });
+ });
+ });
+});
+
diff --git a/packages/client/lib/authx/token-manager.ts b/packages/client/lib/authx/token-manager.ts
new file mode 100644
index 00000000000..6532d88317b
--- /dev/null
+++ b/packages/client/lib/authx/token-manager.ts
@@ -0,0 +1,318 @@
+import { IdentityProvider, TokenResponse } from './identity-provider';
+import { Token } from './token';
+import {Disposable} from './disposable';
+
+/**
+ * The configuration for retrying token refreshes.
+ */
+export interface RetryPolicy {
+ /**
+ * The maximum number of attempts to retry token refreshes.
+ */
+ maxAttempts: number;
+
+ /**
+ * The initial delay in milliseconds before the first retry.
+ */
+ initialDelayMs: number;
+
+ /**
+ * The maximum delay in milliseconds between retries.
+ * The calculated delay will be capped at this value.
+ */
+ maxDelayMs: number;
+
+ /**
+ * The multiplier for exponential backoff between retries.
+ * @example
+ * A value of 2 will double the delay each time:
+ * - 1st retry: initialDelayMs
+ * - 2nd retry: initialDelayMs * 2
+ * - 3rd retry: initialDelayMs * 4
+ */
+ backoffMultiplier: number;
+
+ /**
+ * The percentage of jitter to apply to the delay.
+ * @example
+ * A value of 0.1 will add or subtract up to 10% of the delay.
+ */
+ jitterPercentage?: number;
+
+ /**
+ * Function to classify errors from the identity provider as retryable or non-retryable.
+ * Used to determine if a token refresh failure should be retried based on the type of error.
+ *
+ * The default behavior is to retry all types of errors if no function is provided.
+ *
+ * Common use cases:
+ * - Network errors that may be transient (should retry)
+ * - Invalid credentials (should not retry)
+ * - Rate limiting responses (should retry)
+ *
+ * @param error - The error from the identity provider3
+ * @param attempt - Current retry attempt (0-based)
+ * @returns `true` if the error is considered transient and the operation should be retried
+ *
+ * @example
+ * ```typescript
+ * const retryPolicy: RetryPolicy = {
+ * maxAttempts: 3,
+ * initialDelayMs: 1000,
+ * maxDelayMs: 5000,
+ * backoffMultiplier: 2,
+ * isRetryable: (error) => {
+ * // Retry on network errors or rate limiting
+ * return error instanceof NetworkError ||
+ * error instanceof RateLimitError;
+ * }
+ * };
+ * ```
+ */
+ isRetryable?: (error: unknown, attempt: number) => boolean;
+}
+
+/**
+ * the configuration for the TokenManager.
+ */
+export interface TokenManagerConfig {
+
+ /**
+ * Represents the ratio of a token's lifetime at which a refresh should be triggered.
+ * For example, a value of 0.75 means the token should be refreshed when 75% of its lifetime has elapsed (or when
+ * 25% of its lifetime remains).
+ */
+ expirationRefreshRatio: number;
+
+ // The retry policy for token refreshes. If not provided, no retries will be attempted.
+ retry?: RetryPolicy;
+}
+
+/**
+ * IDPError indicates a failure from the identity provider.
+ *
+ * The `isRetryable` flag is determined by the RetryPolicy's error classification function - if an error is
+ * classified as retryable, it will be marked as transient and the token manager will attempt to recover.
+ */
+export class IDPError extends Error {
+ constructor(public readonly message: string, public readonly isRetryable: boolean) {
+ super(message);
+ this.name = 'IDPError';
+ }
+}
+
+/**
+ * TokenStreamListener is an interface for objects that listen to token changes.
+ */
+export type TokenStreamListener = {
+ /**
+ * Called each time a new token is received.
+ * @param token
+ */
+ onNext: (token: Token) => void;
+
+ /**
+ * Called when an error occurs while calling the underlying IdentityProvider. The error can be
+ * transient and the token manager will attempt to obtain a token again if retry policy is configured.
+ *
+ * Only fatal errors will terminate the stream and stop the token manager.
+ *
+ * @param error
+ */
+ onError: (error: IDPError) => void;
+
+}
+
+/**
+ * TokenManager is responsible for obtaining/refreshing tokens and notifying listeners about token changes.
+ * It uses an IdentityProvider to request tokens. The token refresh is scheduled based on the token's TTL and
+ * the expirationRefreshRatio configuration.
+ *
+ * The TokenManager should be disposed when it is no longer needed by calling the dispose method on the Disposable
+ * returned by start.
+ */
+export class TokenManager {
+ private currentToken: Token | null = null;
+ private refreshTimeout: NodeJS.Timeout | null = null;
+ private listener: TokenStreamListener | null = null;
+ private retryAttempt: number = 0;
+
+ constructor(
+ private readonly identityProvider: IdentityProvider,
+ private readonly config: TokenManagerConfig
+ ) {
+ if (this.config.expirationRefreshRatio > 1) {
+ throw new Error('expirationRefreshRatio must be less than or equal to 1');
+ }
+ if (this.config.expirationRefreshRatio < 0) {
+ throw new Error('expirationRefreshRatio must be greater or equal to 0');
+ }
+ }
+
+ /**
+ * Starts the token manager and returns a Disposable that can be used to stop the token manager.
+ *
+ * @param listener The listener that will receive token updates.
+ * @param initialDelayMs The initial delay in milliseconds before the first token refresh.
+ */
+ public start(listener: TokenStreamListener, initialDelayMs: number = 0): Disposable {
+ if (this.listener) {
+ this.stop();
+ }
+
+ this.listener = listener;
+ this.retryAttempt = 0;
+
+ this.scheduleNextRefresh(initialDelayMs);
+
+ return {
+ dispose: () => this.stop()
+ };
+ }
+
+ public calculateRetryDelay(): number {
+ if (!this.config.retry) return 0;
+
+ const { initialDelayMs, maxDelayMs, backoffMultiplier, jitterPercentage } = this.config.retry;
+
+ let delay = initialDelayMs * Math.pow(backoffMultiplier, this.retryAttempt - 1);
+
+ delay = Math.min(delay, maxDelayMs);
+
+ if (jitterPercentage) {
+ const jitterRange = delay * (jitterPercentage / 100);
+ const jitterAmount = Math.random() * jitterRange - (jitterRange / 2);
+ delay += jitterAmount;
+ }
+
+ let result = Math.max(0, Math.floor(delay));
+
+ return result;
+ }
+
+ private shouldRetry(error: unknown): boolean {
+ if (!this.config.retry) return false;
+
+ const { maxAttempts, isRetryable } = this.config.retry;
+
+ if (this.retryAttempt >= maxAttempts) {
+ return false;
+ }
+
+ if (isRetryable) {
+ return isRetryable(error, this.retryAttempt);
+ }
+
+ return false;
+ }
+
+ public isRunning(): boolean {
+ return this.listener !== null;
+ }
+
+ private async refresh(): Promise {
+ if (!this.listener) {
+ throw new Error('TokenManager is not running, but refresh was called');
+ }
+
+ try {
+ await this.identityProvider.requestToken().then(this.handleNewToken);
+ this.retryAttempt = 0;
+ } catch (error) {
+
+ if (this.shouldRetry(error)) {
+ this.retryAttempt++;
+ const retryDelay = this.calculateRetryDelay();
+ this.notifyError(`Token refresh failed (attempt ${this.retryAttempt}), retrying in ${retryDelay}ms: ${error}`, true)
+ this.scheduleNextRefresh(retryDelay);
+ } else {
+ this.notifyError(error, false);
+ this.stop();
+ }
+ }
+ }
+
+ private handleNewToken = async ({ token: nativeToken, ttlMs }: TokenResponse): Promise => {
+ if (!this.listener) {
+ throw new Error('TokenManager is not running, but a new token was received');
+ }
+ const token = this.wrapAndSetCurrentToken(nativeToken, ttlMs);
+ this.listener.onNext(token);
+
+ this.scheduleNextRefresh(this.calculateRefreshTime(token));
+ }
+
+ /**
+ * Creates a Token object from a native token and sets it as the current token.
+ *
+ * @param nativeToken - The raw token received from the identity provider
+ * @param ttlMs - Time-to-live in milliseconds for the token
+ *
+ * @returns A new Token instance containing the wrapped native token and expiration details
+ *
+ */
+ public wrapAndSetCurrentToken(nativeToken: T, ttlMs: number): Token {
+ const now = Date.now();
+ const token = new Token(
+ nativeToken,
+ now + ttlMs,
+ now
+ );
+ this.currentToken = token;
+ return token;
+ }
+
+ private scheduleNextRefresh(delayMs: number): void {
+ if (this.refreshTimeout) {
+ clearTimeout(this.refreshTimeout);
+ this.refreshTimeout = null;
+ }
+ if (delayMs === 0) {
+ this.refresh();
+ } else {
+ this.refreshTimeout = setTimeout(() => this.refresh(), delayMs);
+ }
+
+ }
+
+ /**
+ * Calculates the time in milliseconds when the token should be refreshed
+ * based on the token's TTL and the expirationRefreshRatio configuration.
+ *
+ * @param token The token to calculate the refresh time for.
+ * @param now The current time in milliseconds. Defaults to Date.now().
+ */
+ public calculateRefreshTime(token: Token, now: number = Date.now()): number {
+ const ttlMs = token.getTtlMs(now);
+ return Math.floor(ttlMs * this.config.expirationRefreshRatio);
+ }
+
+ private stop(): void {
+
+ if (this.refreshTimeout) {
+ clearTimeout(this.refreshTimeout);
+ this.refreshTimeout = null;
+ }
+
+ this.listener = null;
+ this.currentToken = null;
+ this.retryAttempt = 0;
+ }
+
+ /**
+ * Returns the current token or null if no token is available.
+ */
+ public getCurrentToken(): Token | null {
+ return this.currentToken;
+ }
+
+ private notifyError(error: unknown, isRetryable: boolean): void {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+
+ if (!this.listener) {
+ throw new Error(`TokenManager is not running but received an error: ${errorMessage}`);
+ }
+
+ this.listener.onError(new IDPError(errorMessage, isRetryable));
+ }
+}
\ No newline at end of file
diff --git a/packages/client/lib/authx/token.ts b/packages/client/lib/authx/token.ts
new file mode 100644
index 00000000000..3d6e6867d84
--- /dev/null
+++ b/packages/client/lib/authx/token.ts
@@ -0,0 +1,23 @@
+/**
+ * A token that can be used to authenticate with a service.
+ */
+export class Token {
+ constructor(
+ public readonly value: T,
+ //represents the token deadline - the time in milliseconds since the Unix epoch at which the token expires
+ public readonly expiresAtMs: number,
+ //represents the time in milliseconds since the Unix epoch at which the token was received
+ public readonly receivedAtMs: number
+ ) {}
+
+ /**
+ * Returns the time-to-live of the token in milliseconds.
+ * @param now The current time in milliseconds since the Unix epoch.
+ */
+ getTtlMs(now: number): number {
+ if (this.expiresAtMs < now) {
+ return 0;
+ }
+ return this.expiresAtMs - now;
+ }
+}
\ No newline at end of file
diff --git a/packages/client/lib/client/cache.spec.ts b/packages/client/lib/client/cache.spec.ts
new file mode 100644
index 00000000000..55f2672c26c
--- /dev/null
+++ b/packages/client/lib/client/cache.spec.ts
@@ -0,0 +1,700 @@
+import assert from "assert";
+import testUtils, { GLOBAL } from "../test-utils"
+import { BasicClientSideCache, BasicPooledClientSideCache, CacheStats } from "./cache"
+import { REDIS_FLUSH_MODES } from "../commands/FLUSHALL";
+import { once } from 'events';
+
+describe("Client Side Cache", () => {
+ describe('Basic Cache', () => {
+ const csc = new BasicClientSideCache({ maxEntries: 10 });
+
+ testUtils.testWithClient('Basic Cache Miss', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ await client.get("x");
+
+ assert.equal(csc.stats().missCount, 1, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('Basic Cache Hit', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1');
+ assert.equal(await client.get("x"), '1');
+
+ assert.equal(csc.stats().missCount, 1, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 1, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('Max Cache Entries', async client => {
+ csc.clear();
+
+ await client.set('1', 1);
+ assert.equal(await client.get('1'), '1');
+ assert.equal(await client.get('2'), null);
+ assert.equal(await client.get('3'), null);
+ assert.equal(await client.get('4'), null);
+ assert.equal(await client.get('5'), null);
+ assert.equal(await client.get('6'), null);
+ assert.equal(await client.get('7'), null);
+ assert.equal(await client.get('8'), null);
+ assert.equal(await client.get('9'), null);
+ assert.equal(await client.get('10'), null);
+ assert.equal(await client.get('11'), null);
+ assert.equal(await client.get('1'), '1');
+
+ assert.equal(csc.stats().missCount, 12, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('LRU works correctly', async client => {
+ csc.clear();
+
+ await client.set('1', 1);
+ assert.equal(await client.get('1'), '1');
+ assert.equal(await client.get('2'), null);
+ assert.equal(await client.get('3'), null);
+ assert.equal(await client.get('4'), null);
+ assert.equal(await client.get('5'), null);
+ assert.equal(await client.get('1'), '1');
+ assert.equal(await client.get('6'), null);
+ assert.equal(await client.get('7'), null);
+ assert.equal(await client.get('8'), null);
+ assert.equal(await client.get('9'), null);
+ assert.equal(await client.get('10'), null);
+ assert.equal(await client.get('11'), null);
+ assert.equal(await client.get('1'), '1');
+
+ assert.equal(csc.stats().missCount, 11, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 2, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('Basic Cache Clear', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ await client.get("x");
+ csc.clear();
+ await client.get("x");
+
+ assert.equal(csc.stats().missCount, 1, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('Null Invalidate acts as clear', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ await client.get("x");
+ csc.invalidate(null);
+ await client.get("x");
+
+ assert.equal(2, csc.stats().missCount, "Cache Misses");
+ assert.equal(0, csc.stats().hitCount, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('flushdb causes an invalidate null', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1');
+ await client.flushDb(REDIS_FLUSH_MODES.SYNC);
+ assert.equal(await client.get("x"), null);
+
+ assert.equal(csc.stats().missCount, 2, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient('Basic Cache Invalidate', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1', 'first get');
+ await client.set("x", 2);
+ assert.equal(await client.get("x"), '2', 'second get');
+ await client.set("x", 3);
+ assert.equal(await client.get("x"), '3', 'third get');
+
+ assert.equal(csc.stats().missCount, 3, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient("Cached Replies Don't Mutate", async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ await client.set('y', 2);
+ const ret1 = await client.mGet(['x', 'y']);
+ assert.deepEqual(ret1, ['1', '2'], 'first mGet');
+ ret1[0] = '4';
+ const ret2 = await client.mGet(['x', 'y']);
+ assert.deepEqual(ret2, ['1', '2'], 'second mGet');
+ ret2[0] = '8';
+ const ret3 = await client.mGet(['x', 'y']);
+ assert.deepEqual(ret3, ['1', '2'], 'third mGet');
+
+ assert.equal(csc.stats().missCount, 1, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 2, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClient("Cached cleared on disconnect", async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ await client.set('y', 2);
+ const ret1 = await client.mGet(['x', 'y']);
+ assert.deepEqual(ret1, ['1', '2'], 'first mGet');
+
+ assert.equal(csc.stats().missCount, 1, "first Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "first Cache Hits");
+
+ await client.close();
+
+ await client.connect();
+
+ const ret2 = await client.mGet(['x', 'y']);
+ assert.deepEqual(ret2, ['1', '2'], 'second mGet');
+
+ assert.equal(csc.stats().missCount, 1, "second Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "second Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+ });
+
+ describe("Pooled Cache", () => {
+ const csc = new BasicPooledClientSideCache();
+
+ testUtils.testWithClient('Virtual Pool Disconnect', async client1 => {
+ const client2 = client1.duplicate();
+ await client2.connect()
+
+ assert.equal(await client2.get("x"), null);
+ assert.equal(await client1.get("x"), null);
+
+ assert.equal(1, csc.stats().missCount, "Cache Misses");
+ assert.equal(1, csc.stats().hitCount, "Cache Hits");
+
+ await client2.close();
+
+ assert.equal(await client1.get("x"), null);
+ assert.equal(await client1.get("x"), null);
+
+ assert.equal(2, csc.stats().missCount, "Cache Misses");
+ assert.equal(2, csc.stats().hitCount, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+
+ testUtils.testWithClientPool('Basic Cache Miss and Clear', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1');
+
+ assert.equal(1, csc.stats().missCount, "Cache Misses");
+ assert.equal(0, csc.stats().hitCount, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ },
+ poolOptions: {
+ minimum: 5,
+ maximum: 5,
+ acquireTimeout: 0,
+ cleanupDelay: 1,
+ clientSideCache: csc
+ }
+ })
+
+ testUtils.testWithClientPool('Basic Cache Hit', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1');
+ assert.equal(await client.get("x"), '1');
+ assert.equal(await client.get("x"), '1');
+
+ assert.equal(csc.stats().missCount, 1, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 2, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ },
+ poolOptions: {
+ minimum: 5,
+ maximum: 5,
+ acquireTimeout: 0,
+ cleanupDelay: 1,
+ clientSideCache: csc
+ }
+ })
+
+ testUtils.testWithClientPool('Basic Cache Manually Invalidate', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+
+ assert.equal(await client.get("x"), '1', 'first get');
+
+ let p: Promise> = once(csc, 'invalidate');
+ await client.set("x", 2);
+ let [i] = await p;
+
+ assert.equal(await client.get("x"), '2', 'second get');
+
+ p = once(csc, 'invalidate');
+ await client.set("x", 3);
+ [i] = await p;
+
+ assert.equal(await client.get("x"), '3');
+
+ assert.equal(csc.stats().missCount, 3, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ },
+ poolOptions: {
+ minimum: 5,
+ maximum: 5,
+ acquireTimeout: 0,
+ cleanupDelay: 1,
+ clientSideCache: csc
+ }
+ })
+
+ testUtils.testWithClientPool('Basic Cache Invalidate via message', async client => {
+ csc.clear();
+
+ await client.set('x', 1);
+ await client.set('y', 2);
+
+ assert.deepEqual(await client.mGet(['x', 'y']), ['1', '2'], 'first mGet');
+
+ assert.equal(csc.stats().missCount, 1, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+
+ let p: Promise> = once(csc, 'invalidate');
+ await client.set("x", 3);
+ let [i] = await p;
+
+ assert.equal(i, 'x');
+
+ assert.deepEqual(await client.mGet(['x', 'y']), ['3', '2'], 'second mGet');
+
+ assert.equal(csc.stats().missCount, 2, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+
+ p = once(csc, 'invalidate');
+ await client.set("y", 4);
+ [i] = await p;
+
+ assert.equal(i, 'y');
+
+ assert.deepEqual(await client.mGet(['x', 'y']), ['3', '4'], 'second mGet');
+
+ assert.equal(csc.stats().missCount, 3, "Cache Misses");
+ assert.equal(csc.stats().hitCount, 0, "Cache Hits");
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ },
+ poolOptions: {
+ minimum: 5,
+ maximum: 5,
+ acquireTimeout: 0,
+ cleanupDelay: 1,
+ clientSideCache: csc
+ }
+ })
+ });
+
+ describe('Cluster Caching', () => {
+ const csc = new BasicPooledClientSideCache();
+
+ testUtils.testWithCluster('Basic Cache Miss and Clear', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ await client.get("x");
+ await client.set("y", 1);
+ await client.get("y");
+
+ assert.equal(2, csc.stats().missCount, "Cache Misses");
+ assert.equal(0, csc.stats().hitCount, "Cache Hits");
+ }, {
+ ...GLOBAL.CLUSTERS.OPEN,
+ clusterConfiguration: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ })
+
+ testUtils.testWithCluster('Basic Cache Hit', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1');
+ assert.equal(await client.get("x"), '1');
+ assert.equal(await client.get("x"), '1');
+ await client.set("y", 1);
+ assert.equal(await client.get("y"), '1');
+ assert.equal(await client.get("y"), '1');
+ assert.equal(await client.get("y"), '1');
+
+ assert.equal(2, csc.stats().missCount, "Cache Misses");
+ assert.equal(4, csc.stats().hitCount, "Cache Hits");
+ }, {
+ ...GLOBAL.CLUSTERS.OPEN,
+ clusterConfiguration: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ })
+
+ testUtils.testWithCluster('Basic Cache Invalidate', async client => {
+ csc.clear();
+
+ await client.set("x", 1);
+ assert.equal(await client.get("x"), '1');
+ await client.set("x", 2);
+ assert.equal(await client.get("x"), '2');
+ await client.set("x", 3);
+ assert.equal(await client.get("x"), '3');
+
+ await client.set("y", 1);
+ assert.equal(await client.get("y"), '1');
+ await client.set("y", 2);
+ assert.equal(await client.get("y"), '2');
+ await client.set("y", 3);
+ assert.equal(await client.get("y"), '3');
+
+ assert.equal(6, csc.stats().missCount, "Cache Misses");
+ assert.equal(0, csc.stats().hitCount, "Cache Hits");
+ }, {
+ ...GLOBAL.CLUSTERS.OPEN,
+ clusterConfiguration: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ })
+ });
+ describe("CacheStats", () => {
+ describe("CacheStats.of()", () => {
+ it("should correctly initialize stats and calculate derived values", () => {
+ const stats = CacheStats.of(10, 5, 8, 2, 100, 3);
+ assert.strictEqual(stats.hitCount, 10, "hitCount should be 10");
+ assert.strictEqual(stats.missCount, 5, "missCount should be 5");
+ assert.strictEqual(stats.loadSuccessCount, 8, "loadSuccessCount should be 8");
+ assert.strictEqual(stats.loadFailureCount, 2, "loadFailureCount should be 2");
+ assert.strictEqual(stats.totalLoadTime, 100, "totalLoadTime should be 100");
+ assert.strictEqual(stats.evictionCount, 3, "evictionCount should be 3");
+
+ assert.strictEqual(stats.requestCount(), 15, "requestCount should be 15 (10 hits + 5 misses)");
+ assert.strictEqual(stats.hitRate(), 10 / 15, "hitRate should be 10/15");
+ assert.strictEqual(stats.missRate(), 5 / 15, "missRate should be 5/15");
+ assert.strictEqual(stats.loadCount(), 10, "loadCount should be 10 (8 success + 2 failure)");
+ assert.strictEqual(stats.loadFailureRate(), 2 / 10, "loadFailureRate should be 2/10");
+ assert.strictEqual(stats.averageLoadPenalty(), 100 / 10, "averageLoadPenalty should be 10 (100 time / 10 loads)");
+ });
+
+ it("should handle zero values and division by zero for derived values", () => {
+ const stats = CacheStats.of(0, 0, 0, 0, 0, 0);
+ assert.strictEqual(stats.hitCount, 0, "hitCount");
+ assert.strictEqual(stats.missCount, 0, "missCount");
+ assert.strictEqual(stats.loadSuccessCount, 0, "loadSuccessCount");
+ assert.strictEqual(stats.loadFailureCount, 0, "loadFailureCount");
+ assert.strictEqual(stats.totalLoadTime, 0, "totalLoadTime");
+ assert.strictEqual(stats.evictionCount, 0, "evictionCount");
+
+ assert.strictEqual(stats.requestCount(), 0, "requestCount should be 0");
+ assert.strictEqual(stats.hitRate(), 1, "hitRate should be 1 for 0 requests");
+ assert.strictEqual(stats.missRate(), 0, "missRate should be 0 for 0 requests");
+ assert.strictEqual(stats.loadCount(), 0, "loadCount should be 0");
+ assert.strictEqual(stats.loadFailureRate(), 0, "loadFailureRate should be 0 for 0 loads");
+ assert.strictEqual(stats.averageLoadPenalty(), 0, "averageLoadPenalty should be 0 for 0 loads");
+ });
+ });
+
+ describe("CacheStats.empty()", () => {
+ it("should return stats with all zero counts and 0 for rates/penalties", () => {
+ const stats = CacheStats.empty();
+ assert.strictEqual(stats.hitCount, 0, "empty.hitCount");
+ assert.strictEqual(stats.missCount, 0, "empty.missCount");
+ assert.strictEqual(stats.loadSuccessCount, 0, "empty.loadSuccessCount");
+ assert.strictEqual(stats.loadFailureCount, 0, "empty.loadFailureCount");
+ assert.strictEqual(stats.totalLoadTime, 0, "empty.totalLoadTime");
+ assert.strictEqual(stats.evictionCount, 0, "empty.evictionCount");
+
+ assert.strictEqual(stats.requestCount(), 0, "empty.requestCount");
+ assert.strictEqual(stats.hitRate(), 1, "empty.hitRate should be 1");
+ assert.strictEqual(stats.missRate(), 0, "empty.missRate should be 0");
+ assert.strictEqual(stats.loadCount(), 0, "empty.loadCount");
+ assert.strictEqual(stats.loadFailureRate(), 0, "empty.loadFailureRate should be 0");
+ assert.strictEqual(stats.averageLoadPenalty(), 0, "empty.averageLoadPenalty should be 0");
+ });
+ });
+
+ describe("instance methods", () => {
+ const stats1 = CacheStats.of(10, 5, 8, 2, 100, 3);
+ const stats2 = CacheStats.of(20, 10, 12, 3, 200, 5);
+
+ describe("plus()", () => {
+ it("should correctly add two CacheStats instances", () => {
+ const sum = stats1.plus(stats2);
+ assert.strictEqual(sum.hitCount, 30);
+ assert.strictEqual(sum.missCount, 15);
+ assert.strictEqual(sum.loadSuccessCount, 20);
+ assert.strictEqual(sum.loadFailureCount, 5);
+ assert.strictEqual(sum.totalLoadTime, 300);
+ assert.strictEqual(sum.evictionCount, 8);
+ });
+
+ it("should correctly sum large numbers", () => {
+ const statsC = CacheStats.of(Number.MAX_VALUE, 1, 1, 1, 1, 1);
+ const statsD = CacheStats.of(Number.MAX_VALUE, 1, 1, 1, 1, 1);
+ const sum = statsC.plus(statsD);
+ assert.strictEqual(sum.hitCount, Infinity, "Summing MAX_VALUE should result in Infinity");
+ });
+ });
+
+ describe("minus()", () => {
+ it("should correctly subtract one CacheStats instance from another, flooring at 0", () => {
+ const diff = stats2.minus(stats1);
+ assert.strictEqual(diff.hitCount, 10);
+ assert.strictEqual(diff.missCount, 5);
+ assert.strictEqual(diff.loadSuccessCount, 4);
+ assert.strictEqual(diff.loadFailureCount, 1);
+ assert.strictEqual(diff.totalLoadTime, 100);
+ assert.strictEqual(diff.evictionCount, 2);
+ });
+
+ it("should floor results at 0 if minuend is smaller than subtrahend", () => {
+ const sSmall = CacheStats.of(5, 2, 1, 0, 10, 1);
+ const sLarge = CacheStats.of(10, 5, 2, 1, 20, 2);
+ const diff = sSmall.minus(sLarge);
+ assert.strictEqual(diff.hitCount, 0, "hitCount should be floored at 0 (5 - 10)");
+ assert.strictEqual(diff.missCount, 0, "missCount should be floored at 0 (2 - 5)");
+ assert.strictEqual(diff.loadSuccessCount, 0, "loadSuccessCount should be floored at 0 (1 - 2)");
+ assert.strictEqual(diff.loadFailureCount, 0, "loadFailureCount should be floored at 0 (0 - 1)");
+ assert.strictEqual(diff.totalLoadTime, 0, "totalLoadTime should be floored at 0 (10 - 20)");
+ assert.strictEqual(diff.evictionCount, 0, "evictionCount should be floored at 0 (1 - 2)");
+ });
+ });
+
+ describe("hitRate()", () => {
+ it("should return 0 if requestCount is 0", () => {
+ const stats = CacheStats.of(0, 0, 0, 0, 0, 0);
+ assert.strictEqual(stats.hitRate(), 1);
+ });
+ it("should return 0 if hitCount is 0 but missCount > 0", () => {
+ const stats = CacheStats.of(0, 1, 0, 0, 0, 0);
+ assert.strictEqual(stats.hitRate(), 0);
+ });
+ it("should return 1 if missCount is 0 but hitCount > 0", () => {
+ const stats = CacheStats.of(1, 0, 0, 0, 0, 0);
+ assert.strictEqual(stats.hitRate(), 1);
+ });
+ });
+
+ describe("missRate()", () => {
+ it("should return 0 if requestCount is 0", () => {
+ const stats = CacheStats.of(0, 0, 0, 0, 0, 0);
+ assert.strictEqual(stats.missRate(), 0);
+ });
+ it("should return 1 if hitCount is 0 but missCount > 0", () => {
+ const stats = CacheStats.of(0, 1, 0, 0, 0, 0);
+ assert.strictEqual(stats.missRate(), 1);
+ });
+ it("should return 0 if missCount is 0 but hitCount > 0", () => {
+ const stats = CacheStats.of(1, 0, 0, 0, 0, 0);
+ assert.strictEqual(stats.missRate(), 0);
+ });
+ });
+
+ describe("loadFailureRate()", () => {
+ it("should return 0 if loadCount is 0", () => {
+ const stats = CacheStats.of(0, 0, 0, 0, 0, 0);
+ assert.strictEqual(stats.loadFailureRate(), 0);
+ });
+ it("should return 0 if loadFailureCount is 0 but loadSuccessCount > 0", () => {
+ const stats = CacheStats.of(0, 0, 1, 0, 10, 0);
+ assert.strictEqual(stats.loadFailureRate(), 0);
+ });
+ it("should return 1 if loadSuccessCount is 0 but loadFailureCount > 0", () => {
+ const stats = CacheStats.of(0, 0, 0, 1, 10, 0);
+ assert.strictEqual(stats.loadFailureRate(), 1);
+ });
+ });
+
+ describe("averageLoadPenalty()", () => {
+ it("should return 0 if loadCount is 0, even if totalLoadTime > 0", () => {
+ const stats = CacheStats.of(0, 0, 0, 0, 100, 0);
+ assert.strictEqual(stats.averageLoadPenalty(), 0);
+ });
+ it("should return 0 if totalLoadTime is 0 and loadCount > 0", () => {
+ const stats = CacheStats.of(0, 0, 1, 1, 0, 0);
+ assert.strictEqual(stats.averageLoadPenalty(), 0);
+ });
+ });
+ });
+ });
+ it('should reflect comprehensive cache operations in stats via BasicClientSideCache', async function () {
+
+ const csc = new BasicClientSideCache({
+ maxEntries: 2, // Small size to easily trigger evictions
+ });
+
+ testUtils.testWithClient('comprehensive_stats_run', async client => {
+
+ // --- Phase 1: Initial misses and loads ---
+ await client.set('keyA', 'valueA_1');
+ assert.strictEqual(await client.get('keyA'), 'valueA_1', "Get keyA first time");
+ assert.strictEqual(csc.stats().missCount, 1);
+ assert.strictEqual(csc.stats().loadSuccessCount, 1);
+
+ await client.set('keyB', 'valueB_1');
+ assert.strictEqual(await client.get('keyB'), 'valueB_1', "Get keyB first time");
+ assert.strictEqual(csc.stats().missCount, 2);
+ assert.strictEqual(csc.stats().loadSuccessCount, 2);
+
+ // --- Phase 2: Cache hits ---
+ assert.strictEqual(await client.get('keyA'), 'valueA_1', "Get keyA second time (hit)");
+ assert.strictEqual(csc.stats().hitCount, 1);
+
+ assert.strictEqual(await client.get('keyB'), 'valueB_1', "Get keyB second time (hit)");
+ assert.strictEqual(csc.stats().hitCount, 2);
+
+
+ // --- Phase 3: Trigger evictions and more misses/loads ---
+ await client.set('keyC', 'valueC_1');
+ assert.strictEqual(await client.get('keyC'), 'valueC_1', "Get keyC first time (evicts keyA)");
+ assert.strictEqual(csc.stats().missCount, 3);
+ assert.strictEqual(csc.stats().loadSuccessCount, 3);
+ assert.strictEqual(csc.stats().evictionCount, 1);
+
+
+ assert.strictEqual(await client.get('keyA'), 'valueA_1', "Get keyA again (miss after eviction)");
+ assert.strictEqual(csc.stats().missCount, 4);
+ assert.strictEqual(csc.stats().loadSuccessCount, 4);
+ assert.strictEqual(csc.stats().evictionCount, 2);
+
+
+ // --- Phase 4: More hits ---
+ assert.strictEqual(await client.get('keyC'), 'valueC_1', "Get keyC again (hit)");
+ assert.strictEqual(csc.stats().hitCount, 3);
+
+ // --- Phase 5: Update a key (results in invalidation, then miss/load on next GET) ---
+ // Note: A SET operation on an existing cached key should invalidate it.
+ // The invalidation itself isn't directly a "hit" or "miss" for stats,
+ // but the *next* GET will be a miss.
+ await client.set('keyA', 'valueA_2');
+ assert.strictEqual(await client.get('keyA'), 'valueA_2', "Get keyA after SET (miss due to invalidation)");
+
+ assert.strictEqual(csc.stats().hitCount, 3);
+ assert.strictEqual(csc.stats().loadSuccessCount, 5);
+
+
+
+ const stats = csc.stats()
+
+ assert.strictEqual(stats.hitCount, 3, "Final hitCount");
+ assert.strictEqual(stats.missCount, 5, "Final missCount");
+ assert.strictEqual(stats.loadSuccessCount, 5, "Final loadSuccessCount");
+ assert.strictEqual(stats.loadFailureCount, 0, "Final loadFailureCount (expected 0 for this test)");
+ assert.strictEqual(stats.evictionCount, 2, "Final evictionCount");
+ assert.ok(stats.totalLoadTime >= 0, "Final totalLoadTime should be non-negative");
+
+ assert.strictEqual(stats.requestCount(), 8, "Final requestCount (5 misses + 3 hits)");
+ assert.strictEqual(stats.hitRate(), 3 / 8, "Final hitRate");
+ assert.strictEqual(stats.missRate(), 5 / 8, "Final missRate");
+
+ assert.strictEqual(stats.loadCount(), 5, "Final loadCount (5 success + 0 failure)");
+ assert.strictEqual(stats.loadFailureRate(), 0, "Final loadFailureRate (0 failures / 5 loads)");
+
+ if (stats.loadCount() > 0) {
+ assert.ok(stats.averageLoadPenalty() >= 0, "Final averageLoadPenalty should be non-negative");
+ assert.strictEqual(stats.averageLoadPenalty(), stats.totalLoadTime / stats.loadCount(), "Average load penalty calculation");
+ } else {
+ assert.strictEqual(stats.averageLoadPenalty(), 0, "Final averageLoadPenalty should be 0 if no loads");
+ }
+
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ RESP: 3,
+ clientSideCache: csc
+ }
+ });
+ });
+});
diff --git a/packages/client/lib/client/cache.ts b/packages/client/lib/client/cache.ts
new file mode 100644
index 00000000000..7254352ee8f
--- /dev/null
+++ b/packages/client/lib/client/cache.ts
@@ -0,0 +1,870 @@
+import { EventEmitter } from 'stream';
+import RedisClient from '.';
+import { RedisArgument, ReplyUnion, TransformReply, TypeMapping } from '../RESP/types';
+import { BasicCommandParser } from './parser';
+
+/**
+ * A snapshot of cache statistics.
+ *
+ * This class provides an immutable view of the cache's operational statistics at a particular
+ * point in time. It is heavily inspired by the statistics reporting capabilities found in
+ * Ben Manes's Caffeine cache (https://github.com/ben-manes/caffeine).
+ *
+ * Instances of `CacheStats` are typically obtained from a {@link StatsCounter} and can be used
+ * for performance monitoring, debugging, or logging. It includes metrics such as hit rate,
+ * miss rate, load success/failure rates, average load penalty, and eviction counts.
+ *
+ * All statistics are non-negative. Rates and averages are typically in the range `[0.0, 1.0]`,
+ * or `0` if the an operation has not occurred (e.g. hit rate is 0 if there are no requests).
+ *
+ * Cache statistics are incremented according to specific rules:
+ * - When a cache lookup encounters an existing entry, hitCount is incremented.
+ * - When a cache lookup encounters a missing entry, missCount is incremented.
+ * - When a new entry is successfully loaded, loadSuccessCount is incremented and the
+ * loading time is added to totalLoadTime.
+ * - When an entry fails to load, loadFailureCount is incremented and the
+ * loading time is added to totalLoadTime.
+ * - When an entry is evicted due to size constraints or expiration,
+ * evictionCount is incremented.
+ */
+export class CacheStats {
+ /**
+ * Creates a new CacheStats instance with the specified statistics.
+ */
+ private constructor(
+ public readonly hitCount: number,
+ public readonly missCount: number,
+ public readonly loadSuccessCount: number,
+ public readonly loadFailureCount: number,
+ public readonly totalLoadTime: number,
+ public readonly evictionCount: number
+ ) {
+ if (
+ hitCount < 0 ||
+ missCount < 0 ||
+ loadSuccessCount < 0 ||
+ loadFailureCount < 0 ||
+ totalLoadTime < 0 ||
+ evictionCount < 0
+ ) {
+ throw new Error('All statistics values must be non-negative');
+ }
+ }
+
+ /**
+ * Creates a new CacheStats instance with the specified statistics.
+ *
+ * @param hitCount - Number of cache hits
+ * @param missCount - Number of cache misses
+ * @param loadSuccessCount - Number of successful cache loads
+ * @param loadFailureCount - Number of failed cache loads
+ * @param totalLoadTime - Total load time in milliseconds
+ * @param evictionCount - Number of cache evictions
+ */
+ static of(
+ hitCount = 0,
+ missCount = 0,
+ loadSuccessCount = 0,
+ loadFailureCount = 0,
+ totalLoadTime = 0,
+ evictionCount = 0
+ ): CacheStats {
+ return new CacheStats(
+ hitCount,
+ missCount,
+ loadSuccessCount,
+ loadFailureCount,
+ totalLoadTime,
+ evictionCount
+ );
+ }
+
+ /**
+ * Returns a statistics instance where no cache events have been recorded.
+ *
+ * @returns An empty statistics instance
+ */
+ static empty(): CacheStats {
+ return CacheStats.EMPTY_STATS;
+ }
+
+ /**
+ * An empty stats instance with all counters set to zero.
+ */
+ private static readonly EMPTY_STATS = new CacheStats(0, 0, 0, 0, 0, 0);
+
+ /**
+ * Returns the total number of times cache lookup methods have returned
+ * either a cached or uncached value.
+ *
+ * @returns Total number of requests (hits + misses)
+ */
+ requestCount(): number {
+ return this.hitCount + this.missCount;
+ }
+
+ /**
+ * Returns the hit rate of the cache.
+ * This is defined as hitCount / requestCount, or 1.0 when requestCount is 0.
+ *
+ * @returns The ratio of cache requests that were hits (between 0.0 and 1.0)
+ */
+ hitRate(): number {
+ const requestCount = this.requestCount();
+ return requestCount === 0 ? 1.0 : this.hitCount / requestCount;
+ }
+
+ /**
+ * Returns the miss rate of the cache.
+ * This is defined as missCount / requestCount, or 0.0 when requestCount is 0.
+ *
+ * @returns The ratio of cache requests that were misses (between 0.0 and 1.0)
+ */
+ missRate(): number {
+ const requestCount = this.requestCount();
+ return requestCount === 0 ? 0.0 : this.missCount / requestCount;
+ }
+
+ /**
+ * Returns the total number of load operations (successful + failed).
+ *
+ * @returns Total number of load operations
+ */
+ loadCount(): number {
+ return this.loadSuccessCount + this.loadFailureCount;
+ }
+
+ /**
+ * Returns the ratio of cache loading attempts that failed.
+ * This is defined as loadFailureCount / loadCount, or 0.0 when loadCount is 0.
+ *
+ * @returns Ratio of load operations that failed (between 0.0 and 1.0)
+ */
+ loadFailureRate(): number {
+ const loadCount = this.loadCount();
+ return loadCount === 0 ? 0.0 : this.loadFailureCount / loadCount;
+ }
+
+ /**
+ * Returns the average time spent loading new values, in milliseconds.
+ * This is defined as totalLoadTime / loadCount, or 0.0 when loadCount is 0.
+ *
+ * @returns Average load time in milliseconds
+ */
+ averageLoadPenalty(): number {
+ const loadCount = this.loadCount();
+ return loadCount === 0 ? 0.0 : this.totalLoadTime / loadCount;
+ }
+
+ /**
+ * Returns a new CacheStats representing the difference between this CacheStats
+ * and another. Negative values are rounded up to zero.
+ *
+ * @param other - The statistics to subtract from this instance
+ * @returns The difference between this instance and other
+ */
+ minus(other: CacheStats): CacheStats {
+ return CacheStats.of(
+ Math.max(0, this.hitCount - other.hitCount),
+ Math.max(0, this.missCount - other.missCount),
+ Math.max(0, this.loadSuccessCount - other.loadSuccessCount),
+ Math.max(0, this.loadFailureCount - other.loadFailureCount),
+ Math.max(0, this.totalLoadTime - other.totalLoadTime),
+ Math.max(0, this.evictionCount - other.evictionCount)
+ );
+ }
+
+ /**
+ * Returns a new CacheStats representing the sum of this CacheStats and another.
+ *
+ * @param other - The statistics to add to this instance
+ * @returns The sum of this instance and other
+ */
+ plus(other: CacheStats): CacheStats {
+ return CacheStats.of(
+ this.hitCount + other.hitCount,
+ this.missCount + other.missCount,
+ this.loadSuccessCount + other.loadSuccessCount,
+ this.loadFailureCount + other.loadFailureCount,
+ this.totalLoadTime + other.totalLoadTime,
+ this.evictionCount + other.evictionCount
+ );
+ }
+}
+
+/**
+ * An accumulator for cache statistics.
+ *
+ * This interface defines the contract for objects that record cache-related events
+ * such as hits, misses, loads (successes and failures), and evictions. The design
+ * is inspired by the statistics collection mechanisms in Ben Manes's Caffeine cache
+ * (https://github.com/ben-manes/caffeine).
+ *
+ * Implementations of this interface are responsible for aggregating these events.
+ * A snapshot of the current statistics can be obtained by calling the `snapshot()`
+ * method, which returns an immutable {@link CacheStats} object.
+ *
+ * Common implementations include `DefaultStatsCounter` for active statistics collection
+ * and `DisabledStatsCounter` for a no-op version when stats are not needed.
+ */
+export interface StatsCounter {
+ /**
+ * Records cache hits. This should be called when a cache request returns a cached value.
+ *
+ * @param count - The number of hits to record
+ */
+ recordHits(count: number): void;
+
+ /**
+ * Records cache misses. This should be called when a cache request returns a value that was not
+ * found in the cache.
+ *
+ * @param count - The number of misses to record
+ */
+ recordMisses(count: number): void;
+
+ /**
+ * Records the successful load of a new entry. This method should be called when a cache request
+ * causes an entry to be loaded and the loading completes successfully.
+ *
+ * @param loadTime - The number of milliseconds the cache spent computing or retrieving the new value
+ */
+ recordLoadSuccess(loadTime: number): void;
+
+ /**
+ * Records the failed load of a new entry. This method should be called when a cache request
+ * causes an entry to be loaded, but an exception is thrown while loading the entry.
+ *
+ * @param loadTime - The number of milliseconds the cache spent computing or retrieving the new value
+ * prior to the failure
+ */
+ recordLoadFailure(loadTime: number): void;
+
+ /**
+ * Records the eviction of an entry from the cache. This should only be called when an entry is
+ * evicted due to the cache's eviction strategy, and not as a result of manual invalidations.
+ *
+ * @param count - The number of evictions to record
+ */
+ recordEvictions(count: number): void;
+
+ /**
+ * Returns a snapshot of this counter's values. Note that this may be an inconsistent view, as it
+ * may be interleaved with update operations.
+ *
+ * @return A snapshot of this counter's values
+ */
+ snapshot(): CacheStats;
+}
+
+/**
+ * A StatsCounter implementation that does nothing and always returns empty stats.
+ */
+class DisabledStatsCounter implements StatsCounter {
+ static readonly INSTANCE = new DisabledStatsCounter();
+
+ private constructor() { }
+
+ recordHits(count: number): void { }
+ recordMisses(count: number): void { }
+ recordLoadSuccess(loadTime: number): void { }
+ recordLoadFailure(loadTime: number): void { }
+ recordEvictions(count: number): void { }
+ snapshot(): CacheStats { return CacheStats.empty(); }
+}
+
+/**
+ * Returns a StatsCounter that does not record any cache events.
+ *
+ * @return A StatsCounter that does not record metrics
+ */
+function disabledStatsCounter(): StatsCounter {
+ return DisabledStatsCounter.INSTANCE;
+}
+
+/**
+ * A StatsCounter implementation that maintains cache statistics.
+ */
+class DefaultStatsCounter implements StatsCounter {
+ #hitCount = 0;
+ #missCount = 0;
+ #loadSuccessCount = 0;
+ #loadFailureCount = 0;
+ #totalLoadTime = 0;
+ #evictionCount = 0;
+
+ /**
+ * Records cache hits.
+ *
+ * @param count - The number of hits to record
+ */
+ recordHits(count: number): void {
+ this.#hitCount += count;
+ }
+
+ /**
+ * Records cache misses.
+ *
+ * @param count - The number of misses to record
+ */
+ recordMisses(count: number): void {
+ this.#missCount += count;
+ }
+
+ /**
+ * Records the successful load of a new entry.
+ *
+ * @param loadTime - The number of milliseconds spent loading the entry
+ */
+ recordLoadSuccess(loadTime: number): void {
+ this.#loadSuccessCount++;
+ this.#totalLoadTime += loadTime;
+ }
+
+ /**
+ * Records the failed load of a new entry.
+ *
+ * @param loadTime - The number of milliseconds spent attempting to load the entry
+ */
+ recordLoadFailure(loadTime: number): void {
+ this.#loadFailureCount++;
+ this.#totalLoadTime += loadTime;
+ }
+
+ /**
+ * Records cache evictions.
+ *
+ * @param count - The number of evictions to record
+ */
+ recordEvictions(count: number): void {
+ this.#evictionCount += count;
+ }
+
+ /**
+ * Returns a snapshot of the current statistics.
+ *
+ * @returns A snapshot of the current statistics
+ */
+ snapshot(): CacheStats {
+ return CacheStats.of(
+ this.#hitCount,
+ this.#missCount,
+ this.#loadSuccessCount,
+ this.#loadFailureCount,
+ this.#totalLoadTime,
+ this.#evictionCount
+ );
+ }
+
+ /**
+ * Creates a new DefaultStatsCounter.
+ *
+ * @returns A new DefaultStatsCounter instance
+ */
+ static create(): DefaultStatsCounter {
+ return new DefaultStatsCounter();
+ }
+}
+
+type CachingClient = RedisClient;
+type CmdFunc = () => Promise;
+
+type EvictionPolicy = "LRU" | "FIFO"
+
+/**
+ * Configuration options for Client Side Cache
+ */
+export interface ClientSideCacheConfig {
+ /**
+ * Time-to-live in milliseconds for cached entries.
+ * Use 0 for no expiration.
+ * @default 0
+ */
+ ttl?: number;
+
+ /**
+ * Maximum number of entries to store in the cache.
+ * Use 0 for unlimited entries.
+ * @default 0
+ */
+ maxEntries?: number;
+
+ /**
+ * Eviction policy to use when the cache reaches its capacity.
+ * - "LRU" (Least Recently Used): Evicts least recently accessed entries first
+ * - "FIFO" (First In First Out): Evicts oldest entries first
+ * @default "LRU"
+ */
+ evictPolicy?: EvictionPolicy;
+
+ /**
+ * Whether to collect statistics about cache operations.
+ * @default true
+ */
+ recordStats?: boolean;
+}
+
+interface CacheCreator {
+ epoch: number;
+ client: CachingClient;
+}
+
+interface ClientSideCacheEntry {
+ invalidate(): void;
+ validate(): boolean;
+}
+
+/**
+ * Generates a unique cache key from Redis command arguments
+ *
+ * @param redisArgs - Array of Redis command arguments
+ * @returns A unique string key for caching
+ */
+function generateCacheKey(redisArgs: ReadonlyArray): string {
+ const tmp = new Array(redisArgs.length * 2);
+
+ for (let i = 0; i < redisArgs.length; i++) {
+ tmp[i] = redisArgs[i].length;
+ tmp[i + redisArgs.length] = redisArgs[i];
+ }
+
+ return tmp.join('_');
+}
+
+abstract class ClientSideCacheEntryBase implements ClientSideCacheEntry {
+ #invalidated = false;
+ readonly #expireTime: number;
+
+ constructor(ttl: number) {
+ if (ttl == 0) {
+ this.#expireTime = 0;
+ } else {
+ this.#expireTime = Date.now() + ttl;
+ }
+ }
+
+ invalidate(): void {
+ this.#invalidated = true;
+ }
+
+ validate(): boolean {
+ return !this.#invalidated && (this.#expireTime == 0 || (Date.now() < this.#expireTime))
+ }
+}
+
+class ClientSideCacheEntryValue extends ClientSideCacheEntryBase {
+ readonly #value: any;
+
+ get value() {
+ return this.#value;
+ }
+
+ constructor(ttl: number, value: any) {
+ super(ttl);
+ this.#value = value;
+ }
+}
+
+class ClientSideCacheEntryPromise extends ClientSideCacheEntryBase {
+ readonly #sendCommandPromise: Promise;
+
+ get promise() {
+ return this.#sendCommandPromise;
+ }
+
+ constructor(ttl: number, sendCommandPromise: Promise) {
+ super(ttl);
+ this.#sendCommandPromise = sendCommandPromise;
+ }
+}
+
+export abstract class ClientSideCacheProvider extends EventEmitter {
+ abstract handleCache(client: CachingClient, parser: BasicCommandParser, fn: CmdFunc, transformReply: TransformReply | undefined, typeMapping: TypeMapping | undefined): Promise;
+ abstract trackingOn(): Array;
+ abstract invalidate(key: RedisArgument | null): void;
+ abstract clear(): void;
+ abstract stats(): CacheStats;
+ abstract onError(): void;
+ abstract onClose(): void;
+}
+
+export class BasicClientSideCache extends ClientSideCacheProvider {
+ #cacheKeyToEntryMap: Map;
+ #keyToCacheKeySetMap: Map>;
+ readonly ttl: number;
+ readonly maxEntries: number;
+ readonly lru: boolean;
+ #statsCounter: StatsCounter;
+
+
+ recordEvictions(count: number): void {
+ this.#statsCounter.recordEvictions(count);
+ }
+
+ recordHits(count: number): void {
+ this.#statsCounter.recordHits(count);
+ }
+
+ recordMisses(count: number): void {
+ this.#statsCounter.recordMisses(count);
+ }
+
+ constructor(config?: ClientSideCacheConfig) {
+ super();
+
+ this.#cacheKeyToEntryMap = new Map();
+ this.#keyToCacheKeySetMap = new Map>();
+ this.ttl = config?.ttl ?? 0;
+ this.maxEntries = config?.maxEntries ?? 0;
+ this.lru = config?.evictPolicy !== "FIFO";
+
+ const recordStats = config?.recordStats !== false;
+ this.#statsCounter = recordStats ? DefaultStatsCounter.create() : disabledStatsCounter();
+ }
+
+ /* logic of how caching works:
+
+ 1. commands use a CommandParser
+ it enables us to define/retrieve
+ cacheKey - a unique key that corresponds to this command and its arguments
+ redisKeys - an array of redis keys as strings that if the key is modified, will cause redis to invalidate this result when cached
+ 2. check if cacheKey is in our cache
+ 2b1. if its a value cacheEntry - return it
+ 2b2. if it's a promise cache entry - wait on promise and then go to 3c.
+ 3. if cacheEntry is not in cache
+ 3a. send the command save the promise into a a cacheEntry and then wait on result
+ 3b. transform reply (if required) based on transformReply
+ 3b. check the cacheEntry is still valid - in cache and hasn't been deleted)
+ 3c. if valid - overwrite with value entry
+ 4. return previously non cached result
+ */
+ override async handleCache(
+ client: CachingClient,
+ parser: BasicCommandParser,
+ fn: CmdFunc,
+ transformReply?: TransformReply,
+ typeMapping?: TypeMapping
+ ) {
+ let reply: ReplyUnion;
+
+ const cacheKey = generateCacheKey(parser.redisArgs);
+
+ // "2"
+ let cacheEntry = this.get(cacheKey);
+ if (cacheEntry) {
+ // If instanceof is "too slow", can add a "type" and then use an "as" cast to call proper getters.
+ if (cacheEntry instanceof ClientSideCacheEntryValue) { // "2b1"
+ this.#statsCounter.recordHits(1);
+
+ return structuredClone(cacheEntry.value);
+ } else if (cacheEntry instanceof ClientSideCacheEntryPromise) { // 2b2
+ // This counts as a miss since the value hasn't been fully loaded yet.
+ this.#statsCounter.recordMisses(1);
+ reply = await cacheEntry.promise;
+ } else {
+ throw new Error("unknown cache entry type");
+ }
+ } else { // 3/3a
+ this.#statsCounter.recordMisses(1);
+
+ const startTime = performance.now();
+ const promise = fn();
+
+ cacheEntry = this.createPromiseEntry(client, promise);
+ this.set(cacheKey, cacheEntry, parser.keys);
+
+ try {
+ reply = await promise;
+ const loadTime = performance.now() - startTime;
+ this.#statsCounter.recordLoadSuccess(loadTime);
+ } catch (err) {
+ const loadTime = performance.now() - startTime;
+ this.#statsCounter.recordLoadFailure(loadTime);
+
+ if (cacheEntry.validate()) {
+ this.delete(cacheKey!);
+ }
+
+ throw err;
+ }
+ }
+
+ // 3b
+ let val;
+ if (transformReply) {
+ val = transformReply(reply, parser.preserve, typeMapping);
+ } else {
+ val = reply;
+ }
+
+ // 3c
+ if (cacheEntry.validate()) { // revalidating promise entry (dont save value, if promise entry has been invalidated)
+ // 3d
+ cacheEntry = this.createValueEntry(client, val);
+ this.set(cacheKey, cacheEntry, parser.keys);
+ this.emit("cached-key", cacheKey);
+ } else {
+ // cache entry for key got invalidated between execution and saving, so not saving
+ }
+
+ return structuredClone(val);
+ }
+
+ override trackingOn() {
+ return ['CLIENT', 'TRACKING', 'ON'];
+ }
+
+ override invalidate(key: RedisArgument | null) {
+ if (key === null) {
+ this.clear(false);
+ this.emit("invalidate", key);
+
+ return;
+ }
+
+ const keySet = this.#keyToCacheKeySetMap.get(key.toString());
+ if (keySet) {
+ for (const cacheKey of keySet) {
+ const entry = this.#cacheKeyToEntryMap.get(cacheKey);
+ if (entry) {
+ entry.invalidate();
+ }
+ this.#cacheKeyToEntryMap.delete(cacheKey);
+ }
+ this.#keyToCacheKeySetMap.delete(key.toString());
+ }
+
+ this.emit('invalidate', key);
+ }
+
+ override clear(resetStats = true) {
+ const oldSize = this.#cacheKeyToEntryMap.size;
+ this.#cacheKeyToEntryMap.clear();
+ this.#keyToCacheKeySetMap.clear();
+
+ if (resetStats) {
+ if (!(this.#statsCounter instanceof DisabledStatsCounter)) {
+ this.#statsCounter = DefaultStatsCounter.create();
+ }
+ } else {
+ // If old entries were evicted due to clear, record them as evictions
+ if (oldSize > 0) {
+ this.#statsCounter.recordEvictions(oldSize);
+ }
+ }
+ }
+
+ get(cacheKey: string) {
+ const val = this.#cacheKeyToEntryMap.get(cacheKey);
+
+ if (val && !val.validate()) {
+ this.delete(cacheKey);
+ this.#statsCounter.recordEvictions(1);
+ this.emit("cache-evict", cacheKey);
+
+ return undefined;
+ }
+
+ if (val !== undefined && this.lru) {
+ this.#cacheKeyToEntryMap.delete(cacheKey);
+ this.#cacheKeyToEntryMap.set(cacheKey, val);
+ }
+
+ return val;
+ }
+
+ delete(cacheKey: string) {
+ const entry = this.#cacheKeyToEntryMap.get(cacheKey);
+ if (entry) {
+ entry.invalidate();
+ this.#cacheKeyToEntryMap.delete(cacheKey);
+ }
+ }
+
+ has(cacheKey: string) {
+ return this.#cacheKeyToEntryMap.has(cacheKey);
+ }
+
+ set(cacheKey: string, cacheEntry: ClientSideCacheEntry, keys: Array) {
+ let count = this.#cacheKeyToEntryMap.size;
+ const oldEntry = this.#cacheKeyToEntryMap.get(cacheKey);
+
+ if (oldEntry) {
+ count--; // overwriting, so not incrementig
+ oldEntry.invalidate();
+ }
+
+ if (this.maxEntries > 0 && count >= this.maxEntries) {
+ this.deleteOldest();
+ this.#statsCounter.recordEvictions(1);
+ }
+
+ this.#cacheKeyToEntryMap.set(cacheKey, cacheEntry);
+
+ for (const key of keys) {
+ if (!this.#keyToCacheKeySetMap.has(key.toString())) {
+ this.#keyToCacheKeySetMap.set(key.toString(), new Set());
+ }
+
+ const cacheKeySet = this.#keyToCacheKeySetMap.get(key.toString());
+ cacheKeySet!.add(cacheKey);
+ }
+ }
+
+ size() {
+ return this.#cacheKeyToEntryMap.size;
+ }
+
+ createValueEntry(client: CachingClient, value: any): ClientSideCacheEntryValue {
+ return new ClientSideCacheEntryValue(this.ttl, value);
+ }
+
+ createPromiseEntry(client: CachingClient, sendCommandPromise: Promise): ClientSideCacheEntryPromise {
+ return new ClientSideCacheEntryPromise(this.ttl, sendCommandPromise);
+ }
+
+ override stats(): CacheStats {
+ return this.#statsCounter.snapshot();
+ }
+
+ override onError(): void {
+ this.clear();
+ }
+
+ override onClose() {
+ this.clear();
+ }
+
+ /**
+ * @internal
+ */
+ deleteOldest() {
+ const it = this.#cacheKeyToEntryMap[Symbol.iterator]();
+ const n = it.next();
+ if (!n.done) {
+ const key = n.value[0];
+ const entry = this.#cacheKeyToEntryMap.get(key);
+ if (entry) {
+ entry.invalidate();
+ }
+ this.#cacheKeyToEntryMap.delete(key);
+ }
+ }
+
+ /**
+ * Get cache entries for debugging
+ * @internal
+ */
+ entryEntries(): IterableIterator<[string, ClientSideCacheEntry]> {
+ return this.#cacheKeyToEntryMap.entries();
+ }
+
+ /**
+ * Get key set entries for debugging
+ * @internal
+ */
+ keySetEntries(): IterableIterator<[string, Set]> {
+ return this.#keyToCacheKeySetMap.entries();
+ }
+}
+
+export abstract class PooledClientSideCacheProvider extends BasicClientSideCache {
+ #disabled = false;
+
+ disable(): void {
+ this.#disabled = true;
+ }
+
+ enable(): void {
+ this.#disabled = false;
+ }
+
+ override get(cacheKey: string): ClientSideCacheEntry | undefined {
+ if (this.#disabled) {
+ return undefined;
+ }
+
+ return super.get(cacheKey);
+ }
+
+ override has(cacheKey: string): boolean {
+ if (this.#disabled) {
+ return false;
+ }
+
+ return super.has(cacheKey);
+ }
+
+ onPoolClose(): void {
+ this.clear();
+ }
+}
+
+export class BasicPooledClientSideCache extends PooledClientSideCacheProvider {
+ override onError() {
+ this.clear(false);
+ }
+
+ override onClose() {
+ this.clear(false);
+ }
+}
+
+class PooledClientSideCacheEntryValue extends ClientSideCacheEntryValue {
+ #creator: CacheCreator;
+
+ constructor(ttl: number, creator: CacheCreator, value: any) {
+ super(ttl, value);
+
+ this.#creator = creator;
+ }
+
+ override validate(): boolean {
+ let ret = super.validate();
+ if (this.#creator) {
+ ret = ret && this.#creator.client.isReady && this.#creator.client.socketEpoch == this.#creator.epoch
+ }
+
+ return ret;
+ }
+}
+
+class PooledClientSideCacheEntryPromise extends ClientSideCacheEntryPromise {
+ #creator: CacheCreator;
+
+ constructor(ttl: number, creator: CacheCreator, sendCommandPromise: Promise) {
+ super(ttl, sendCommandPromise);
+
+ this.#creator = creator;
+ }
+
+ override validate(): boolean {
+ let ret = super.validate();
+
+ return ret && this.#creator.client.isReady && this.#creator.client.socketEpoch == this.#creator.epoch
+ }
+}
+
+export class PooledNoRedirectClientSideCache extends BasicPooledClientSideCache {
+ override createValueEntry(client: CachingClient, value: any): ClientSideCacheEntryValue {
+ const creator = {
+ epoch: client.socketEpoch,
+ client: client
+ };
+
+ return new PooledClientSideCacheEntryValue(this.ttl, creator, value);
+ }
+
+ override createPromiseEntry(client: CachingClient, sendCommandPromise: Promise): ClientSideCacheEntryPromise {
+ const creator = {
+ epoch: client.socketEpoch,
+ client: client
+ };
+
+ return new PooledClientSideCacheEntryPromise(this.ttl, creator, sendCommandPromise);
+ }
+
+ override onError() { }
+
+ override onClose() { }
+}
diff --git a/packages/client/lib/client/commands-queue.ts b/packages/client/lib/client/commands-queue.ts
new file mode 100644
index 00000000000..9b7f737113b
--- /dev/null
+++ b/packages/client/lib/client/commands-queue.ts
@@ -0,0 +1,544 @@
+import { DoublyLinkedNode, DoublyLinkedList, EmptyAwareSinglyLinkedList } from './linked-list';
+import encodeCommand from '../RESP/encoder';
+import { Decoder, PUSH_TYPE_MAPPING, RESP_TYPES } from '../RESP/decoder';
+import { TypeMapping, ReplyUnion, RespVersions, RedisArgument } from '../RESP/types';
+import { ChannelListeners, PubSub, PubSubCommand, PubSubListener, PubSubType, PubSubTypeListeners } from './pub-sub';
+import { AbortError, ErrorReply, CommandTimeoutDuringMaintenanceError, TimeoutError } from '../errors';
+import { MonitorCallback } from '.';
+import { dbgMaintenance } from './enterprise-maintenance-manager';
+
+export interface CommandOptions {
+ chainId?: symbol;
+ asap?: boolean;
+ abortSignal?: AbortSignal;
+ /**
+ * Maps between RESP and JavaScript types
+ */
+ typeMapping?: T;
+ /**
+ * Timeout for the command in milliseconds
+ */
+ timeout?: number;
+}
+
+export interface CommandToWrite extends CommandWaitingForReply {
+ args: ReadonlyArray;
+ chainId: symbol | undefined;
+ abort: {
+ signal: AbortSignal;
+ listener: () => unknown;
+ } | undefined;
+ timeout: {
+ signal: AbortSignal;
+ listener: () => unknown;
+ originalTimeout: number | undefined;
+ } | undefined;
+}
+
+interface CommandWaitingForReply {
+ resolve(reply?: unknown): void;
+ reject(err: unknown): void;
+ channelsCounter: number | undefined;
+ typeMapping: TypeMapping | undefined;
+}
+
+export type OnShardedChannelMoved = (channel: string, listeners: ChannelListeners) => void;
+
+const PONG = Buffer.from('pong'),
+ RESET = Buffer.from('RESET');
+
+const RESP2_PUSH_TYPE_MAPPING = {
+ ...PUSH_TYPE_MAPPING,
+ [RESP_TYPES.SIMPLE_STRING]: Buffer
+};
+
+// Try to handle a push notification. Return whether you
+// successfully consumed the notification or not. This is
+// important in order for the queue to be able to pass the
+// notification to another handler if the current one did not
+// succeed.
+type PushHandler = (pushItems: Array) => boolean;
+
+export default class RedisCommandsQueue {
+ readonly #respVersion;
+ readonly #maxLength;
+ readonly #toWrite = new DoublyLinkedList();
+ readonly #waitingForReply = new EmptyAwareSinglyLinkedList();
+ readonly #onShardedChannelMoved;
+ #chainInExecution: symbol | undefined;
+ readonly decoder;
+ readonly #pubSub = new PubSub();
+
+ #pushHandlers: PushHandler[] = [this.#onPush.bind(this)];
+
+ #maintenanceCommandTimeout: number | undefined
+
+ setMaintenanceCommandTimeout(ms: number | undefined) {
+ // Prevent possible api misuse
+ if (this.#maintenanceCommandTimeout === ms) {
+ dbgMaintenance(`Queue already set maintenanceCommandTimeout to ${ms}, skipping`);
+ return;
+ };
+
+ dbgMaintenance(`Setting maintenance command timeout to ${ms}`);
+ this.#maintenanceCommandTimeout = ms;
+
+ if(this.#maintenanceCommandTimeout === undefined) {
+ dbgMaintenance(`Queue will keep maintenanceCommandTimeout for exisitng commands, just to be on the safe side. New commands will receive normal timeouts`);
+ return;
+ }
+
+ let counter = 0;
+ const total = this.#toWrite.length;
+
+ // Overwrite timeouts of all eligible toWrite commands
+ for(const node of this.#toWrite.nodes()) {
+ const command = node.value;
+
+ // Remove timeout listener if it exists
+ RedisCommandsQueue.#removeTimeoutListener(command)
+
+ counter++;
+ const newTimeout = this.#maintenanceCommandTimeout;
+
+ // Overwrite the command's timeout
+ const signal = AbortSignal.timeout(newTimeout);
+ command.timeout = {
+ signal,
+ listener: () => {
+ this.#toWrite.remove(node);
+ command.reject(new CommandTimeoutDuringMaintenanceError(newTimeout));
+ },
+ originalTimeout: command.timeout?.originalTimeout
+ };
+ signal.addEventListener('abort', command.timeout.listener, { once: true });
+ };
+ dbgMaintenance(`Total of ${counter} of ${total} timeouts reset to ${ms}`);
+ }
+
+ get isPubSubActive() {
+ return this.#pubSub.isActive;
+ }
+
+ constructor(
+ respVersion: RespVersions,
+ maxLength: number | null | undefined,
+ onShardedChannelMoved: OnShardedChannelMoved
+ ) {
+ this.#respVersion = respVersion;
+ this.#maxLength = maxLength;
+ this.#onShardedChannelMoved = onShardedChannelMoved;
+ this.decoder = this.#initiateDecoder();
+ }
+
+ #onReply(reply: ReplyUnion) {
+ this.#waitingForReply.shift()!.resolve(reply);
+ }
+
+ #onErrorReply(err: ErrorReply) {
+ this.#waitingForReply.shift()!.reject(err);
+ }
+
+ #onPush(push: Array) {
+ // TODO: type
+ if (this.#pubSub.handleMessageReply(push)) return true;
+
+ const isShardedUnsubscribe = PubSub.isShardedUnsubscribe(push);
+ if (isShardedUnsubscribe && !this.#waitingForReply.length) {
+ const channel = push[1].toString();
+ this.#onShardedChannelMoved(
+ channel,
+ this.#pubSub.removeShardedListeners(channel)
+ );
+ return true;
+ } else if (isShardedUnsubscribe || PubSub.isStatusReply(push)) {
+ const head = this.#waitingForReply.head!.value;
+ if (
+ (Number.isNaN(head.channelsCounter!) && push[2] === 0) ||
+ --head.channelsCounter! === 0
+ ) {
+ this.#waitingForReply.shift()!.resolve();
+ }
+ return true;
+ }
+ return false
+ }
+
+ #getTypeMapping() {
+ return this.#waitingForReply.head!.value.typeMapping ?? {};
+ }
+
+ #initiateDecoder() {
+ return new Decoder({
+ onReply: reply => this.#onReply(reply),
+ onErrorReply: err => this.#onErrorReply(err),
+ //TODO: we can shave off a few cycles by not adding onPush handler at all if CSC is not used
+ onPush: push => {
+ for(const pushHandler of this.#pushHandlers) {
+ if(pushHandler(push)) return
+ }
+ },
+ getTypeMapping: () => this.#getTypeMapping()
+ });
+ }
+
+ addPushHandler(handler: PushHandler): void {
+ this.#pushHandlers.push(handler);
+ }
+
+ async waitForInflightCommandsToComplete(): Promise {
+ // In-flight commands already completed
+ if(this.#waitingForReply.length === 0) {
+ return
+ };
+ // Otherwise wait for in-flight commands to fire `empty` event
+ return new Promise(resolve => {
+ this.#waitingForReply.events.on('empty', resolve)
+ });
+ }
+
+ addCommand(
+ args: ReadonlyArray,
+ options?: CommandOptions
+ ): Promise {
+ if (this.#maxLength && this.#toWrite.length + this.#waitingForReply.length >= this.#maxLength) {
+ return Promise.reject(new Error('The queue is full'));
+ } else if (options?.abortSignal?.aborted) {
+ return Promise.reject(new AbortError());
+ }
+
+ return new Promise((resolve, reject) => {
+ let node: DoublyLinkedNode;
+ const value: CommandToWrite = {
+ args,
+ chainId: options?.chainId,
+ abort: undefined,
+ timeout: undefined,
+ resolve,
+ reject,
+ channelsCounter: undefined,
+ typeMapping: options?.typeMapping
+ };
+
+ // If #maintenanceCommandTimeout was explicitly set, we should
+ // use it instead of the timeout provided by the command
+ const timeout = this.#maintenanceCommandTimeout ?? options?.timeout;
+ const wasInMaintenance = this.#maintenanceCommandTimeout !== undefined;
+ if (timeout) {
+
+ const signal = AbortSignal.timeout(timeout);
+ value.timeout = {
+ signal,
+ listener: () => {
+ this.#toWrite.remove(node);
+ value.reject(wasInMaintenance ? new CommandTimeoutDuringMaintenanceError(timeout) : new TimeoutError());
+ },
+ originalTimeout: options?.timeout
+ };
+ signal.addEventListener('abort', value.timeout.listener, { once: true });
+ }
+
+ const signal = options?.abortSignal;
+ if (signal) {
+ value.abort = {
+ signal,
+ listener: () => {
+ this.#toWrite.remove(node);
+ value.reject(new AbortError());
+ }
+ };
+ signal.addEventListener('abort', value.abort.listener, { once: true });
+ }
+
+ node = this.#toWrite.add(value, options?.asap);
+ });
+ }
+
+ #addPubSubCommand(command: PubSubCommand, asap = false, chainId?: symbol) {
+ return new Promise((resolve, reject) => {
+ this.#toWrite.add({
+ args: command.args,
+ chainId,
+ abort: undefined,
+ timeout: undefined,
+ resolve() {
+ command.resolve();
+ resolve();
+ },
+ reject(err) {
+ command.reject?.();
+ reject(err);
+ },
+ channelsCounter: command.channelsCounter,
+ typeMapping: PUSH_TYPE_MAPPING
+ }, asap);
+ });
+ }
+
+ #setupPubSubHandler() {
+ // RESP3 uses `onPush` to handle PubSub, so no need to modify `onReply`
+ if (this.#respVersion !== 2) return;
+
+ this.decoder.onReply = (reply => {
+ if (Array.isArray(reply)) {
+ if (this.#onPush(reply)) return;
+
+ if (PONG.equals(reply[0] as Buffer)) {
+ const { resolve, typeMapping } = this.#waitingForReply.shift()!,
+ buffer = ((reply[1] as Buffer).length === 0 ? reply[0] : reply[1]) as Buffer;
+ resolve(typeMapping?.[RESP_TYPES.SIMPLE_STRING] === Buffer ? buffer : buffer.toString());
+ return;
+ }
+ }
+
+ return this.#onReply(reply);
+ }) as Decoder['onReply'];
+ this.decoder.getTypeMapping = () => RESP2_PUSH_TYPE_MAPPING;
+ }
+
+ subscribe(
+ type: PubSubType,
+ channels: string | Array,
+ listener: PubSubListener,
+ returnBuffers?: T
+ ) {
+ const command = this.#pubSub.subscribe(type, channels, listener, returnBuffers);
+ if (!command) return;
+
+ this.#setupPubSubHandler();
+ return this.#addPubSubCommand(command);
+ }
+
+ #resetDecoderCallbacks() {
+ this.decoder.onReply = (reply => this.#onReply(reply)) as Decoder['onReply'];
+ this.decoder.getTypeMapping = () => this.#getTypeMapping();
+ }
+
+ unsubscribe(
+ type: PubSubType,
+ channels?: string | Array,
+ listener?: PubSubListener,
+ returnBuffers?: T
+ ) {
+ const command = this.#pubSub.unsubscribe(type, channels, listener, returnBuffers);
+ if (!command) return;
+
+ if (command && this.#respVersion === 2) {
+ // RESP2 modifies `onReply` to handle PubSub (see #setupPubSubHandler)
+ const { resolve } = command;
+ command.resolve = () => {
+ if (!this.#pubSub.isActive) {
+ this.#resetDecoderCallbacks();
+ }
+
+ resolve();
+ };
+ }
+
+ return this.#addPubSubCommand(command);
+ }
+
+ removeAllPubSubListeners() {
+ return this.#pubSub.removeAllListeners();
+ }
+
+ resubscribe(chainId?: symbol) {
+ const commands = this.#pubSub.resubscribe();
+ if (!commands.length) return;
+
+ this.#setupPubSubHandler();
+ return Promise.all(
+ commands.map(command => this.#addPubSubCommand(command, true, chainId))
+ );
+ }
+
+ extendPubSubChannelListeners(
+ type: PubSubType,
+ channel: string,
+ listeners: ChannelListeners
+ ) {
+ const command = this.#pubSub.extendChannelListeners(type, channel, listeners);
+ if (!command) return;
+
+ this.#setupPubSubHandler();
+ return this.#addPubSubCommand(command);
+ }
+
+ extendPubSubListeners(type: PubSubType, listeners: PubSubTypeListeners) {
+ const command = this.#pubSub.extendTypeListeners(type, listeners);
+ if (!command) return;
+
+ this.#setupPubSubHandler();
+ return this.#addPubSubCommand(command);
+ }
+
+ getPubSubListeners(type: PubSubType) {
+ return this.#pubSub.listeners[type];
+ }
+
+ monitor(callback: MonitorCallback, options?: CommandOptions) {
+ return new Promise((resolve, reject) => {
+ const typeMapping = options?.typeMapping ?? {};
+ this.#toWrite.add({
+ args: ['MONITOR'],
+ chainId: options?.chainId,
+ abort: undefined,
+ timeout: undefined,
+ // using `resolve` instead of using `.then`/`await` to make sure it'll be called before processing the next reply
+ resolve: () => {
+ // after running `MONITOR` only `MONITOR` and `RESET` replies are expected
+ // any other command should cause an error
+
+ // if `RESET` already overrides `onReply`, set monitor as it's fallback
+ if (this.#resetFallbackOnReply) {
+ this.#resetFallbackOnReply = callback;
+ } else {
+ this.decoder.onReply = callback;
+ }
+
+ this.decoder.getTypeMapping = () => typeMapping;
+ resolve();
+ },
+ reject,
+ channelsCounter: undefined,
+ typeMapping
+ }, options?.asap);
+ });
+ }
+
+ resetDecoder() {
+ this.#resetDecoderCallbacks();
+ this.decoder.reset();
+ }
+
+ #resetFallbackOnReply?: Decoder['onReply'];
+
+ async reset(chainId: symbol, typeMapping?: T) {
+ return new Promise((resolve, reject) => {
+ // overriding onReply to handle `RESET` while in `MONITOR` or PubSub mode
+ this.#resetFallbackOnReply = this.decoder.onReply;
+ this.decoder.onReply = (reply => {
+ if (
+ (typeof reply === 'string' && reply === 'RESET') ||
+ (reply instanceof Buffer && RESET.equals(reply))
+ ) {
+ this.#resetDecoderCallbacks();
+ this.#resetFallbackOnReply = undefined;
+ this.#pubSub.reset();
+
+ this.#waitingForReply.shift()!.resolve(reply);
+ return;
+ }
+
+ this.#resetFallbackOnReply!(reply);
+ }) as Decoder['onReply'];
+
+ this.#toWrite.push({
+ args: ['RESET'],
+ chainId,
+ abort: undefined,
+ timeout: undefined,
+ resolve,
+ reject,
+ channelsCounter: undefined,
+ typeMapping
+ });
+ });
+ }
+
+ isWaitingToWrite() {
+ return this.#toWrite.length > 0;
+ }
+
+ *commandsToWrite() {
+ let toSend = this.#toWrite.shift();
+ while (toSend) {
+ let encoded: ReadonlyArray
+ try {
+ encoded = encodeCommand(toSend.args);
+ } catch (err) {
+ toSend.reject(err);
+ toSend = this.#toWrite.shift();
+ continue;
+ }
+
+ // TODO reuse `toSend` or create new object?
+ (toSend as any).args = undefined;
+ if (toSend.abort) {
+ RedisCommandsQueue.#removeAbortListener(toSend);
+ toSend.abort = undefined;
+ }
+ if (toSend.timeout) {
+ RedisCommandsQueue.#removeTimeoutListener(toSend);
+ toSend.timeout = undefined;
+ }
+ this.#chainInExecution = toSend.chainId;
+ toSend.chainId = undefined;
+ this.#waitingForReply.push(toSend);
+
+ yield encoded;
+ toSend = this.#toWrite.shift();
+ }
+ }
+
+ #flushWaitingForReply(err: Error): void {
+ for (const node of this.#waitingForReply) {
+ node.reject(err);
+ }
+ this.#waitingForReply.reset();
+ }
+
+ static #removeAbortListener(command: CommandToWrite) {
+ command.abort!.signal.removeEventListener('abort', command.abort!.listener);
+ }
+
+ static #removeTimeoutListener(command: CommandToWrite) {
+ command.timeout?.signal.removeEventListener('abort', command.timeout!.listener);
+ }
+
+ static #flushToWrite(toBeSent: CommandToWrite, err: Error) {
+ if (toBeSent.abort) {
+ RedisCommandsQueue.#removeAbortListener(toBeSent);
+ }
+ if (toBeSent.timeout) {
+ RedisCommandsQueue.#removeTimeoutListener(toBeSent);
+ }
+
+ toBeSent.reject(err);
+ }
+
+ flushWaitingForReply(err: Error): void {
+ this.resetDecoder();
+ this.#pubSub.reset();
+
+ this.#flushWaitingForReply(err);
+
+ if (!this.#chainInExecution) return;
+
+ while (this.#toWrite.head?.value.chainId === this.#chainInExecution) {
+ RedisCommandsQueue.#flushToWrite(
+ this.#toWrite.shift()!,
+ err
+ );
+ }
+
+ this.#chainInExecution = undefined;
+ }
+
+ flushAll(err: Error): void {
+ this.resetDecoder();
+ this.#pubSub.reset();
+ this.#flushWaitingForReply(err);
+ for (const node of this.#toWrite) {
+ RedisCommandsQueue.#flushToWrite(node, err);
+ }
+ this.#toWrite.reset();
+ }
+
+ isEmpty() {
+ return (
+ this.#toWrite.length === 0 &&
+ this.#waitingForReply.length === 0
+ );
+ }
+}
diff --git a/packages/client/lib/client/enterprise-maintenance-manager.spec.ts b/packages/client/lib/client/enterprise-maintenance-manager.spec.ts
new file mode 100644
index 00000000000..59e2bfe8c0a
--- /dev/null
+++ b/packages/client/lib/client/enterprise-maintenance-manager.spec.ts
@@ -0,0 +1,49 @@
+import assert from "node:assert";
+import { createClient } from "../../";
+
+describe("EnterpriseMaintenanceManager does not prevent proper options parsing", () => {
+ it("should not throw when initializing without options", async () => {
+ const client = createClient();
+ assert.doesNotThrow(async () => {
+ //Expected to reject because there is no url or socket provided and there is no running server on localhost
+ await assert.rejects(client.connect);
+ });
+ });
+
+ it("should not throw when initializing without url/socket and with maint", async () => {
+ const client = createClient({
+ maintNotifications: "enabled",
+ RESP: 3,
+ });
+ assert.doesNotThrow(async () => {
+ //Expected to reject because there is no url or socket provided and there is no running server on localhost
+ await assert.rejects(client.connect);
+ });
+ });
+ it("should not throw when initializing with url and with maint", async () => {
+ const client = createClient({
+ maintNotifications: "enabled",
+ RESP: 3,
+ url: "redis://localhost:6379",
+ });
+ assert.doesNotThrow(async () => {
+ //Expected to reject because there is no url or socket provided and there is no running server on localhost
+ await assert.rejects(client.connect);
+ });
+ });
+
+ it("should not throw when initializing with socket and with maint", async () => {
+ const client = createClient({
+ maintNotifications: "enabled",
+ RESP: 3,
+ socket: {
+ host: "localhost",
+ port: 6379,
+ },
+ });
+ assert.doesNotThrow(async () => {
+ //Expected to reject because there is no url or socket provided and there is no running server on localhost
+ await assert.rejects(client.connect);
+ });
+ });
+});
diff --git a/packages/client/lib/client/enterprise-maintenance-manager.ts b/packages/client/lib/client/enterprise-maintenance-manager.ts
new file mode 100644
index 00000000000..9892a5be8a4
--- /dev/null
+++ b/packages/client/lib/client/enterprise-maintenance-manager.ts
@@ -0,0 +1,359 @@
+import { RedisClientOptions } from ".";
+import RedisCommandsQueue from "./commands-queue";
+import { RedisArgument } from "../..";
+import { isIP } from "net";
+import { lookup } from "dns/promises";
+import assert from "node:assert";
+import { setTimeout } from "node:timers/promises";
+import RedisSocket, { RedisTcpSocketOptions } from "./socket";
+import diagnostics_channel from "node:diagnostics_channel";
+
+export const MAINTENANCE_EVENTS = {
+ PAUSE_WRITING: "pause-writing",
+ RESUME_WRITING: "resume-writing",
+ TIMEOUTS_UPDATE: "timeouts-update",
+} as const;
+
+const PN = {
+ MOVING: "MOVING",
+ MIGRATING: "MIGRATING",
+ MIGRATED: "MIGRATED",
+ FAILING_OVER: "FAILING_OVER",
+ FAILED_OVER: "FAILED_OVER",
+};
+
+export type DiagnosticsEvent = {
+ type: string;
+ timestamp: number;
+ data?: Object;
+};
+
+export const dbgMaintenance = (...args: any[]) => {
+ if (!process.env.REDIS_DEBUG_MAINTENANCE) return;
+ return console.log("[MNT]", ...args);
+};
+
+export const emitDiagnostics = (event: DiagnosticsEvent) => {
+ if (!process.env.REDIS_EMIT_DIAGNOSTICS) return;
+
+ const channel = diagnostics_channel.channel("redis.maintenance");
+ channel.publish(event);
+};
+
+export interface MaintenanceUpdate {
+ relaxedCommandTimeout?: number;
+ relaxedSocketTimeout?: number;
+}
+
+interface Client {
+ _ejectSocket: () => RedisSocket;
+ _insertSocket: (socket: RedisSocket) => void;
+ _pause: () => void;
+ _unpause: () => void;
+ _maintenanceUpdate: (update: MaintenanceUpdate) => void;
+ duplicate: () => Client;
+ connect: () => Promise;
+ destroy: () => void;
+ on: (event: string, callback: (value: unknown) => void) => void;
+}
+
+export default class EnterpriseMaintenanceManager {
+ #commandsQueue: RedisCommandsQueue;
+ #options: RedisClientOptions;
+ #isMaintenance = 0;
+ #client: Client;
+
+ static setupDefaultMaintOptions(options: RedisClientOptions) {
+ if (options.maintNotifications === undefined) {
+ options.maintNotifications =
+ options?.RESP === 3 ? "auto" : "disabled";
+ }
+ if (options.maintEndpointType === undefined) {
+ options.maintEndpointType = "auto";
+ }
+ if (options.maintRelaxedSocketTimeout === undefined) {
+ options.maintRelaxedSocketTimeout = 10000;
+ }
+ if (options.maintRelaxedCommandTimeout === undefined) {
+ options.maintRelaxedCommandTimeout = 10000;
+ }
+ }
+
+ static async getHandshakeCommand(
+ options: RedisClientOptions,
+ ): Promise<
+ | { cmd: Array; errorHandler: (error: Error) => void }
+ | undefined
+ > {
+ if (options.maintNotifications === "disabled") return;
+
+ const host = options.url
+ ? new URL(options.url).hostname
+ : (options.socket as RedisTcpSocketOptions | undefined)?.host;
+
+ if (!host) return;
+
+ const tls = options.socket?.tls ?? false
+
+ const movingEndpointType = await determineEndpoint(tls, host, options);
+ return {
+ cmd: [
+ "CLIENT",
+ "MAINT_NOTIFICATIONS",
+ "ON",
+ "moving-endpoint-type",
+ movingEndpointType,
+ ],
+ errorHandler: (error: Error) => {
+ dbgMaintenance("handshake failed:", error);
+ if (options.maintNotifications === "enabled") {
+ throw error;
+ }
+ },
+ };
+ }
+
+ constructor(
+ commandsQueue: RedisCommandsQueue,
+ client: Client,
+ options: RedisClientOptions,
+ ) {
+ this.#commandsQueue = commandsQueue;
+ this.#options = options;
+ this.#client = client;
+
+ this.#commandsQueue.addPushHandler(this.#onPush);
+ }
+
+ #onPush = (push: Array): boolean => {
+ dbgMaintenance("ONPUSH:", push.map(String));
+
+ if (!Array.isArray(push) || !["MOVING", "MIGRATING", "MIGRATED", "FAILING_OVER", "FAILED_OVER"].includes(String(push[0]))) {
+ return false;
+ }
+
+ const type = String(push[0]);
+
+ emitDiagnostics({
+ type,
+ timestamp: Date.now(),
+ data: {
+ push: push.map(String),
+ },
+ });
+ switch (type) {
+ case PN.MOVING: {
+ // [ 'MOVING', '17', '15', '54.78.247.156:12075' ]
+ // ^seq ^after ^new ip
+ const afterSeconds = push[2];
+ const url: string | null = push[3] ? String(push[3]) : null;
+ dbgMaintenance("Received MOVING:", afterSeconds, url);
+ this.#onMoving(afterSeconds, url);
+ return true;
+ }
+ case PN.MIGRATING:
+ case PN.FAILING_OVER: {
+ dbgMaintenance("Received MIGRATING|FAILING_OVER");
+ this.#onMigrating();
+ return true;
+ }
+ case PN.MIGRATED:
+ case PN.FAILED_OVER: {
+ dbgMaintenance("Received MIGRATED|FAILED_OVER");
+ this.#onMigrated();
+ return true;
+ }
+ }
+ return false;
+ };
+
+ // Queue:
+ // toWrite [ C D E ]
+ // waitingForReply [ A B ] - aka In-flight commands
+ //
+ // time: ---1-2---3-4-5-6---------------------------
+ //
+ // 1. [EVENT] MOVING PN received
+ // 2. [ACTION] Pause writing ( we need to wait for new socket to connect and for all in-flight commands to complete )
+ // 3. [EVENT] New socket connected
+ // 4. [EVENT] In-flight commands completed
+ // 5. [ACTION] Destroy old socket
+ // 6. [ACTION] Resume writing -> we are going to write to the new socket from now on
+ #onMoving = async (
+ afterSeconds: number,
+ url: string | null,
+ ): Promise => {
+ // 1 [EVENT] MOVING PN received
+ this.#onMigrating();
+
+ let host: string;
+ let port: number;
+
+ // The special value `none` indicates that the `MOVING` message doesnβt need
+ // to contain an endpoint. Instead it contains the value `null` then. In
+ // such a corner case, the client is expected to schedule a graceful
+ // reconnect to its currently configured endpoint after half of the grace
+ // period that was communicated by the server is over.
+ if (url === null) {
+ assert(this.#options.maintEndpointType === "none");
+ assert(this.#options.socket !== undefined);
+ assert("host" in this.#options.socket);
+ assert(typeof this.#options.socket.host === "string");
+ host = this.#options.socket.host;
+ assert(typeof this.#options.socket.port === "number");
+ port = this.#options.socket.port;
+ const waitTime = (afterSeconds * 1000) / 2;
+ dbgMaintenance(`Wait for ${waitTime}ms`);
+ await setTimeout(waitTime);
+ } else {
+ const split = url.split(":");
+ host = split[0];
+ port = Number(split[1]);
+ }
+
+ // 2 [ACTION] Pause writing
+ dbgMaintenance("Pausing writing of new commands to old socket");
+ this.#client._pause();
+
+ dbgMaintenance("Creating new tmp client");
+ let start = performance.now();
+
+ // If the URL is provided, it takes precedense
+ // the options object could just be mutated
+ if(this.#options.url) {
+ const u = new URL(this.#options.url);
+ u.hostname = host;
+ u.port = String(port);
+ this.#options.url = u.toString();
+ } else {
+ this.#options.socket = {
+ ...this.#options.socket,
+ host,
+ port
+ }
+ }
+ const tmpClient = this.#client.duplicate();
+ tmpClient.on('error', (error: unknown) => {
+ //We dont know how to handle tmp client errors
+ dbgMaintenance(`[ERR]`, error)
+ });
+ dbgMaintenance(`Tmp client created in ${( performance.now() - start ).toFixed(2)}ms`);
+ dbgMaintenance(
+ `Set timeout for tmp client to ${this.#options.maintRelaxedSocketTimeout}`,
+ );
+ tmpClient._maintenanceUpdate({
+ relaxedCommandTimeout: this.#options.maintRelaxedCommandTimeout,
+ relaxedSocketTimeout: this.#options.maintRelaxedSocketTimeout,
+ });
+ dbgMaintenance(`Connecting tmp client: ${host}:${port}`);
+ start = performance.now();
+ await tmpClient.connect();
+ dbgMaintenance(`Connected to tmp client in ${(performance.now() - start).toFixed(2)}ms`);
+ // 3 [EVENT] New socket connected
+
+ dbgMaintenance(`Wait for all in-flight commands to complete`);
+ await this.#commandsQueue.waitForInflightCommandsToComplete();
+ dbgMaintenance(`In-flight commands completed`);
+ // 4 [EVENT] In-flight commands completed
+
+ dbgMaintenance("Swap client sockets...");
+ const oldSocket = this.#client._ejectSocket();
+ const newSocket = tmpClient._ejectSocket();
+ this.#client._insertSocket(newSocket);
+ tmpClient._insertSocket(oldSocket);
+ tmpClient.destroy();
+ dbgMaintenance("Swap client sockets done.");
+ // 5 + 6
+ dbgMaintenance("Resume writing");
+ this.#client._unpause();
+ this.#onMigrated();
+ };
+
+ #onMigrating = () => {
+ this.#isMaintenance++;
+ if (this.#isMaintenance > 1) {
+ dbgMaintenance(`Timeout relaxation already done`);
+ return;
+ }
+
+ const update: MaintenanceUpdate = {
+ relaxedCommandTimeout: this.#options.maintRelaxedCommandTimeout,
+ relaxedSocketTimeout: this.#options.maintRelaxedSocketTimeout,
+ };
+
+ this.#client._maintenanceUpdate(update);
+ };
+
+ #onMigrated = () => {
+ //ensure that #isMaintenance doesnt go under 0
+ this.#isMaintenance = Math.max(this.#isMaintenance - 1, 0);
+ if (this.#isMaintenance > 0) {
+ dbgMaintenance(`Not ready to unrelax timeouts yet`);
+ return;
+ }
+
+ const update: MaintenanceUpdate = {
+ relaxedCommandTimeout: undefined,
+ relaxedSocketTimeout: undefined
+ };
+
+ this.#client._maintenanceUpdate(update);
+ };
+}
+
+export type MovingEndpointType =
+ | "auto"
+ | "internal-ip"
+ | "internal-fqdn"
+ | "external-ip"
+ | "external-fqdn"
+ | "none";
+
+function isPrivateIP(ip: string): boolean {
+ const version = isIP(ip);
+ if (version === 4) {
+ const octets = ip.split(".").map(Number);
+ return (
+ octets[0] === 10 ||
+ (octets[0] === 172 && octets[1] >= 16 && octets[1] <= 31) ||
+ (octets[0] === 192 && octets[1] === 168)
+ );
+ }
+ if (version === 6) {
+ return (
+ ip.startsWith("fc") || // Unique local
+ ip.startsWith("fd") || // Unique local
+ ip === "::1" || // Loopback
+ ip.startsWith("fe80") // Link-local unicast
+ );
+ }
+ return false;
+}
+
+async function determineEndpoint(
+ tlsEnabled: boolean,
+ host: string,
+ options: RedisClientOptions,
+): Promise {
+ assert(options.maintEndpointType !== undefined);
+ if (options.maintEndpointType !== "auto") {
+ dbgMaintenance(
+ `Determine endpoint type: ${options.maintEndpointType}`,
+ );
+ return options.maintEndpointType;
+ }
+
+ const ip = isIP(host) ? host : (await lookup(host, { family: 0 })).address;
+
+ const isPrivate = isPrivateIP(ip);
+
+ let result: MovingEndpointType;
+ if (tlsEnabled) {
+ result = isPrivate ? "internal-fqdn" : "external-fqdn";
+ } else {
+ result = isPrivate ? "internal-ip" : "external-ip";
+ }
+
+ dbgMaintenance(`Determine endpoint type: ${result}`);
+ return result;
+}
diff --git a/packages/client/lib/client/index.spec.ts b/packages/client/lib/client/index.spec.ts
new file mode 100644
index 00000000000..d7ce00f38ae
--- /dev/null
+++ b/packages/client/lib/client/index.spec.ts
@@ -0,0 +1,1031 @@
+import { strict as assert } from 'node:assert';
+import testUtils, { GLOBAL, waitTillBeenCalled } from '../test-utils';
+import RedisClient, { RedisClientOptions, RedisClientType } from '.';
+import { AbortError, ClientClosedError, ClientOfflineError, ConnectionTimeoutError, DisconnectsClientError, ErrorReply, MultiErrorReply, TimeoutError, WatchError } from '../errors';
+import { defineScript } from '../lua-script';
+import { spy, stub } from 'sinon';
+import { once } from 'node:events';
+import { MATH_FUNCTION, loadMathFunction } from '../commands/FUNCTION_LOAD.spec';
+import { RESP_TYPES } from '../RESP/decoder';
+import { BlobStringReply, NumberReply } from '../RESP/types';
+import { SortedSetMember } from '../commands/generic-transformers';
+import { CommandParser } from './parser';
+
+export const SQUARE_SCRIPT = defineScript({
+ SCRIPT:
+ `local number = redis.call('GET', KEYS[1])
+ return number * number`,
+ NUMBER_OF_KEYS: 1,
+ FIRST_KEY_INDEX: 0,
+ parseCommand(parser: CommandParser, key: string) {
+ parser.pushKey(key);
+ },
+ transformReply: undefined as unknown as () => NumberReply
+});
+
+describe('Client', () => {
+ describe('initialization', () => {
+ describe('clientSideCache validation', () => {
+ const clientSideCacheConfig = { ttl: 0, maxEntries: 0 };
+
+ it('should throw error when clientSideCache is enabled with RESP 2', () => {
+ assert.throws(
+ () => new RedisClient({
+ clientSideCache: clientSideCacheConfig,
+ RESP: 2,
+ }),
+ new Error('Client Side Caching is only supported with RESP3')
+ );
+ });
+
+ it('should throw error when clientSideCache is enabled with RESP undefined', () => {
+ assert.throws(
+ () => new RedisClient({
+ clientSideCache: clientSideCacheConfig,
+ }),
+ new Error('Client Side Caching is only supported with RESP3')
+ );
+ });
+
+ it('should not throw when clientSideCache is enabled with RESP 3', () => {
+ assert.doesNotThrow(() =>
+ new RedisClient({
+ clientSideCache: clientSideCacheConfig,
+ RESP: 3,
+ })
+ );
+ });
+ });
+ });
+
+ describe('parseURL', () => {
+ it('redis://user:secret@localhost:6379/0', async () => {
+ const result = RedisClient.parseURL('redis://user:secret@localhost:6379/0');
+ const expected: RedisClientOptions = {
+ socket: {
+ host: 'localhost',
+ port: 6379,
+ tls: false
+ },
+ username: 'user',
+ password: 'secret',
+ database: 0,
+ credentialsProvider: {
+ type: 'async-credentials-provider',
+ credentials: async () => ({
+ password: 'secret',
+ username: 'user'
+ })
+ }
+ };
+
+ // Compare everything except the credentials function
+ const { credentialsProvider: resultCredProvider, ...resultRest } = result;
+ const { credentialsProvider: expectedCredProvider, ...expectedRest } = expected;
+
+ // Compare non-function properties
+ assert.deepEqual(resultRest, expectedRest);
+
+ if (result?.credentialsProvider?.type === 'async-credentials-provider'
+ && expected?.credentialsProvider?.type === 'async-credentials-provider') {
+
+ // Compare the actual output of the credentials functions
+ const resultCreds = await result.credentialsProvider?.credentials();
+ const expectedCreds = await expected.credentialsProvider?.credentials();
+ assert.deepEqual(resultCreds, expectedCreds);
+ } else {
+ assert.fail('Credentials provider type mismatch');
+ }
+
+
+ });
+
+ it('rediss://user:secret@localhost:6379/0', async () => {
+ const result = RedisClient.parseURL('rediss://user:secret@localhost:6379/0');
+ const expected: RedisClientOptions = {
+ socket: {
+ host: 'localhost',
+ port: 6379,
+ tls: true
+ },
+ username: 'user',
+ password: 'secret',
+ database: 0,
+ credentialsProvider: {
+ credentials: async () => ({
+ password: 'secret',
+ username: 'user'
+ }),
+ type: 'async-credentials-provider'
+ }
+ };
+
+ // Compare everything except the credentials function
+ const { credentialsProvider: resultCredProvider, ...resultRest } = result;
+ const { credentialsProvider: expectedCredProvider, ...expectedRest } = expected;
+
+ // Compare non-function properties
+ assert.deepEqual(resultRest, expectedRest);
+ assert.equal(resultCredProvider?.type, expectedCredProvider?.type);
+
+ if (result?.credentialsProvider?.type === 'async-credentials-provider' &&
+ expected?.credentialsProvider?.type === 'async-credentials-provider') {
+
+ // Compare the actual output of the credentials functions
+ const resultCreds = await result.credentialsProvider.credentials();
+ const expectedCreds = await expected.credentialsProvider.credentials();
+ assert.deepEqual(resultCreds, expectedCreds);
+
+ } else {
+ assert.fail('Credentials provider type mismatch');
+ }
+
+ })
+
+ it('Invalid protocol', () => {
+ assert.throws(
+ () => RedisClient.parseURL('redi://user:secret@localhost:6379/0'),
+ TypeError
+ );
+ });
+
+ it('Invalid pathname', () => {
+ assert.throws(
+ () => RedisClient.parseURL('redis://user:secret@localhost:6379/NaN'),
+ TypeError
+ );
+ });
+
+ it('redis://localhost', () => {
+ assert.deepEqual(
+ RedisClient.parseURL('redis://localhost'),
+ {
+ socket: {
+ host: 'localhost',
+ tls: false
+ }
+ }
+ );
+ });
+
+ it('DB in URL should be parsed', async () => {
+ const client = RedisClient.create({
+ url: 'redis://user:secret@localhost:6379/5'
+ });
+
+ assert.equal(client?.options?.database, 5);
+ })
+ });
+
+ describe('parseOptions', () => {
+ it('should throw error if tls socket option is set to true and the url protocol is "redis:"', () => {
+ assert.throws(
+ () => RedisClient.parseOptions({
+ url: 'redis://localhost',
+ socket: {
+ tls: true
+ }
+ }),
+ TypeError
+ );
+ });
+ it('should throw error if tls socket option is set to false and the url protocol is "rediss:"', () => {
+ assert.throws(
+ () => RedisClient.parseOptions({
+ url: 'rediss://localhost',
+ socket: {
+ tls: false
+ }
+ }),
+ TypeError
+ );
+ });
+ it('should not throw when tls socket option and url protocol matches"', () => {
+ assert.equal(
+ RedisClient.parseOptions({
+ url: 'rediss://localhost',
+ socket: {
+ tls: true
+ }
+ }).socket.tls,
+ true
+ );
+ assert.equal(
+ RedisClient.parseOptions({
+ url: 'redis://localhost',
+ socket: {
+ tls: false
+ }
+ }).socket.tls,
+ false
+ );
+ });
+ });
+
+ describe('authentication', () => {
+ testUtils.testWithClient('Client should be authenticated', async client => {
+ assert.equal(
+ await client.ping(),
+ 'PONG'
+ );
+ }, GLOBAL.SERVERS.PASSWORD);
+
+ testUtils.testWithClient('Client can authenticate asynchronously ', async client => {
+ assert.equal(
+ await client.ping(),
+ 'PONG'
+ );
+ }, GLOBAL.SERVERS.ASYNC_BASIC_AUTH);
+
+ testUtils.testWithClient('Client can authenticate using the streaming credentials provider for initial token acquisition',
+ async client => {
+ assert.equal(
+ await client.ping(),
+ 'PONG'
+ );
+ }, GLOBAL.SERVERS.STREAMING_AUTH);
+
+ testUtils.testWithClient('should execute AUTH before SELECT', async client => {
+ assert.equal(
+ (await client.clientInfo()).db,
+ 2
+ );
+ }, {
+ ...GLOBAL.SERVERS.PASSWORD,
+ clientOptions: {
+ ...GLOBAL.SERVERS.PASSWORD.clientOptions,
+ database: 2
+ },
+ minimumDockerVersion: [6, 2]
+ });
+ });
+
+ testUtils.testWithClient('should set connection name', async client => {
+ assert.equal(
+ await client.clientGetName(),
+ 'name'
+ );
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ name: 'name'
+ }
+ });
+
+ // TODO: fix & uncomment
+ // testUtils.testWithClient('connect, ready and end events', async client => {
+ // await Promise.all([
+ // once(client, 'connect'),
+ // once(client, 'ready'),
+ // client.connect()
+ // ]);
+
+ // await Promise.all([
+ // once(client, 'end'),
+ // client.close()
+ // ]);
+ // }, {
+ // ...GLOBAL.SERVERS.OPEN,
+ // disableClientSetup: true
+ // });
+
+ describe('sendCommand', () => {
+ testUtils.testWithClient('PING', async client => {
+ assert.equal(await client.sendCommand(['PING']), 'PONG');
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('Unactivated AbortController should not abort', async client => {
+ await client.sendCommand(['PING'], {
+ abortSignal: new AbortController().signal
+ });
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('AbortError', async client => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(client.sendCommand(['PING'], {
+ abortSignal: AbortSignal.timeout(5)
+ }), AbortError);
+ })
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('Timeout with custom timeout config', async client => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(client.sendCommand(['PING'], {
+ timeout: 5
+ }), TimeoutError);
+ })
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithCluster('Timeout with custom timeout config (cluster)', async cluster => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(cluster.sendCommand(undefined, true, ['PING'], {
+ timeout: 5
+ }), TimeoutError);
+ })
+ }, GLOBAL.CLUSTERS.OPEN);
+
+ testUtils.testWithClientSentinel('Timeout with custom timeout config (sentinel)', async sentinel => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(sentinel.sendCommand(true, ['PING'], {
+ timeout: 5
+ }), TimeoutError);
+ })
+ }, GLOBAL.CLUSTERS.OPEN);
+
+ testUtils.testWithClient('Timeout with global timeout config', async client => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(client.ping(), TimeoutError);
+ await assert.rejects(client.sendCommand(['PING']), TimeoutError);
+ });
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ commandOptions: {
+ timeout: 5
+ }
+ }
+ });
+
+ testUtils.testWithCluster('Timeout with global timeout config (cluster)', async cluster => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(cluster.HSET('key', 'foo', 'value'), TimeoutError);
+ await assert.rejects(cluster.sendCommand(undefined, true, ['PING']), TimeoutError);
+ });
+ }, {
+ ...GLOBAL.CLUSTERS.OPEN,
+ clusterConfiguration: {
+ commandOptions: {
+ timeout: 5
+ }
+ }
+ });
+
+ testUtils.testWithClientSentinel('Timeout with global timeout config (sentinel)', async sentinel => {
+ await blockSetImmediate(async () => {
+ await assert.rejects(sentinel.HSET('key', 'foo', 'value'), TimeoutError);
+ await assert.rejects(sentinel.sendCommand(true, ['PING']), TimeoutError);
+ });
+ }, {
+ ...GLOBAL.SENTINEL.OPEN,
+ clientOptions: {
+ commandOptions: {
+ timeout: 5
+ }
+ }
+ });
+
+ testUtils.testWithClient('undefined and null should not break the client', async client => {
+ await assert.rejects(
+ client.sendCommand([null as any, undefined as any]),
+ TypeError
+ );
+
+ assert.equal(
+ await client.ping(),
+ 'PONG'
+ );
+ }, GLOBAL.SERVERS.OPEN);
+ });
+
+ describe('multi', () => {
+ testUtils.testWithClient('simple', async client => {
+ assert.deepEqual(
+ await client.multi()
+ .ping()
+ .set('key', 'value')
+ .get('key')
+ .exec(),
+ ['PONG', 'OK', 'value']
+ );
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should reject the whole chain on error', client => {
+ return assert.rejects(
+ client.multi()
+ .ping()
+ .addCommand(['INVALID COMMAND'])
+ .ping()
+ .exec()
+ );
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should reject the whole chain upon client disconnect', async client => {
+ await client.close();
+
+ return assert.rejects(
+ client.multi()
+ .ping()
+ .set('key', 'value')
+ .get('key')
+ .exec(),
+ ClientClosedError
+ );
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('with script', async client => {
+ assert.deepEqual(
+ await client.multi()
+ .set('key', '2')
+ .square('key')
+ .exec(),
+ ['OK', 4]
+ );
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ scripts: {
+ square: SQUARE_SCRIPT
+ }
+ }
+ });
+
+ testUtils.testWithClient('WatchError', async client => {
+ await client.watch('key');
+
+ const duplicate = await client.duplicate().connect();
+ try {
+ await client.set(
+ 'key',
+ '1'
+ );
+ } finally {
+ duplicate.destroy();
+ }
+
+ await assert.rejects(
+ client.multi()
+ .decr('key')
+ .exec(),
+ WatchError
+ );
+ }, GLOBAL.SERVERS.OPEN);
+
+ describe('execAsPipeline', () => {
+ testUtils.testWithClient('exec(true)', async client => {
+ assert.deepEqual(
+ await client.multi()
+ .ping()
+ .exec(true),
+ ['PONG']
+ );
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('empty execAsPipeline', async client => {
+ assert.deepEqual(
+ await client.multi().execAsPipeline(),
+ []
+ );
+ }, GLOBAL.SERVERS.OPEN);
+ });
+
+ testUtils.testWithClient('should remember selected db', async client => {
+ await client.multi()
+ .select(1)
+ .exec();
+ await killClient(client);
+ assert.equal(
+ (await client.clientInfo()).db,
+ 1
+ );
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ minimumDockerVersion: [6, 2] // CLIENT INFO
+ });
+
+ testUtils.testWithClient('should handle error replies (#2665)', async client => {
+ await assert.rejects(
+ client.multi()
+ .set('key', 'value')
+ .hGetAll('key')
+ .exec(),
+ err => {
+ assert.ok(err instanceof MultiErrorReply);
+ assert.equal(err.replies.length, 2);
+ assert.deepEqual(err.errorIndexes, [1]);
+ assert.ok(err.replies[1] instanceof ErrorReply);
+ // @ts-ignore TS2802
+ assert.deepEqual([...err.errors()], [err.replies[1]]);
+ return true;
+ }
+ );
+ }, GLOBAL.SERVERS.OPEN);
+ });
+
+ testUtils.testWithClient('scripts', async client => {
+ const [, reply] = await Promise.all([
+ client.set('key', '2'),
+ client.square('key')
+ ]);
+
+ assert.equal(reply, 4);
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ scripts: {
+ square: SQUARE_SCRIPT
+ }
+ }
+ });
+
+ const module = {
+ echo: {
+ parseCommand(parser: CommandParser, message: string) {
+ parser.push('ECHO', message);
+ },
+ transformReply: undefined as unknown as () => BlobStringReply
+ }
+ };
+
+ testUtils.testWithClient('modules', async client => {
+ assert.equal(
+ await client.module.echo('message'),
+ 'message'
+ );
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ modules: {
+ module
+ }
+ }
+ });
+
+ testUtils.testWithClient('functions', async client => {
+ const [, , reply] = await Promise.all([
+ loadMathFunction(client),
+ client.set('key', '2'),
+ client.math.square('key')
+ ]);
+
+ assert.equal(reply, 4);
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ minimumDockerVersion: [7, 0],
+ clientOptions: {
+ functions: {
+ math: MATH_FUNCTION.library
+ }
+ }
+ });
+
+ testUtils.testWithClient('duplicate should reuse command options', async client => {
+ const duplicate = client.duplicate();
+
+ await duplicate.connect();
+
+ try {
+ assert.deepEqual(
+ await duplicate.ping(),
+ Buffer.from('PONG')
+ );
+ } finally {
+ duplicate.close();
+ }
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ commandOptions: {
+ typeMapping: {
+ [RESP_TYPES.SIMPLE_STRING]: Buffer
+ }
+ }
+ },
+ disableClientSetup: true,
+ });
+
+ async function killClient(
+ client: RedisClientType,
+ errorClient: RedisClientType = client
+ ): Promise {
+ const onceErrorPromise = once(errorClient, 'error');
+ await client.sendCommand(['QUIT']);
+ await Promise.all([
+ onceErrorPromise,
+ assert.rejects(client.ping())
+ ]);
+ }
+
+ testUtils.testWithClient('should reconnect when socket disconnects', async client => {
+ await killClient(client);
+ await assert.doesNotReject(client.ping());
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should remember selected db', async client => {
+ await client.select(1);
+ await killClient(client);
+ assert.equal(
+ (await client.clientInfo()).db,
+ 1
+ );
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ minimumDockerVersion: [6, 2] // CLIENT INFO
+ });
+
+ testUtils.testWithClient('scanIterator', async client => {
+ const entries: Array = [],
+ keys = new Set();
+ for (let i = 0; i < 100; i++) {
+ const key = i.toString();
+ keys.add(key);
+ entries.push(key, '');
+ }
+
+ await client.mSet(entries);
+
+ const results = new Set();
+ for await (const keys of client.scanIterator()) {
+ for (const key of keys) {
+ results.add(key);
+ }
+ }
+
+ assert.deepEqual(keys, results);
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('hScanIterator', async client => {
+ const hash: Record = {};
+ for (let i = 0; i < 100; i++) {
+ hash[i.toString()] = i.toString();
+ }
+
+ await client.hSet('key', hash);
+
+ const results: Record = {};
+ for await (const entries of client.hScanIterator('key')) {
+ for (const { field, value } of entries) {
+ results[field] = value;
+ }
+ }
+
+ assert.deepEqual(hash, results);
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('hScanNoValuesIterator', async client => {
+ const hash: Record = {};
+ const expectedFields: Array = [];
+ for (let i = 0; i < 100; i++) {
+ hash[i.toString()] = i.toString();
+ expectedFields.push(i.toString());
+ }
+
+ await client.hSet('key', hash);
+
+ const actualFields: Array = [];
+ for await (const fields of client.hScanNoValuesIterator('key')) {
+ for (const field of fields) {
+ actualFields.push(field);
+ }
+ }
+
+ function sort(a: string, b: string) {
+ return Number(a) - Number(b);
+ }
+
+ assert.deepEqual(actualFields.sort(sort), expectedFields);
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ minimumDockerVersion: [7, 4]
+ });
+
+ testUtils.testWithClient('sScanIterator', async client => {
+ const members = new Set();
+ for (let i = 0; i < 100; i++) {
+ members.add(i.toString());
+ }
+
+ await client.sAdd('key', Array.from(members));
+
+ const results = new Set();
+ for await (const members of client.sScanIterator('key')) {
+ for (const member of members) {
+ results.add(member);
+ }
+ }
+
+ assert.deepEqual(members, results);
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('zScanIterator', async client => {
+ const members: Array = [],
+ map = new Map();
+ for (let i = 0; i < 100; i++) {
+ const member = {
+ value: i.toString(),
+ score: 1
+ };
+ map.set(member.value, member.score);
+ members.push(member);
+ }
+
+ await client.zAdd('key', members);
+
+ const results = new Map();
+ for await (const members of client.zScanIterator('key')) {
+ for (const { value, score } of members) {
+ results.set(value, score);
+ }
+ }
+
+ assert.deepEqual(map, results);
+ }, GLOBAL.SERVERS.OPEN);
+
+ describe('PubSub', () => {
+ testUtils.testWithClient('should be able to publish and subscribe to messages', async publisher => {
+ function assertStringListener(message: string, channel: string) {
+ assert.equal(typeof message, 'string');
+ assert.equal(typeof channel, 'string');
+ }
+
+ function assertBufferListener(message: Buffer, channel: Buffer) {
+ assert.ok(message instanceof Buffer);
+ assert.ok(channel instanceof Buffer);
+ }
+
+ const subscriber = await publisher.duplicate().connect();
+
+ try {
+ const channelListener1 = spy(assertBufferListener),
+ channelListener2 = spy(assertStringListener),
+ patternListener = spy(assertStringListener);
+
+ await Promise.all([
+ subscriber.subscribe('channel', channelListener1, true),
+ subscriber.subscribe('channel', channelListener2),
+ subscriber.pSubscribe('channel*', patternListener)
+ ]);
+ await Promise.all([
+ waitTillBeenCalled(channelListener1),
+ waitTillBeenCalled(channelListener2),
+ waitTillBeenCalled(patternListener),
+ publisher.publish(Buffer.from('channel'), Buffer.from('message'))
+ ]);
+ assert.ok(channelListener1.calledOnceWithExactly(Buffer.from('message'), Buffer.from('channel')));
+ assert.ok(channelListener2.calledOnceWithExactly('message', 'channel'));
+ assert.ok(patternListener.calledOnceWithExactly('message', 'channel'));
+
+ await subscriber.unsubscribe('channel', channelListener1, true);
+ await Promise.all([
+ waitTillBeenCalled(channelListener2),
+ waitTillBeenCalled(patternListener),
+ publisher.publish('channel', 'message')
+ ]);
+ assert.ok(channelListener1.calledOnce);
+ assert.ok(channelListener2.calledTwice);
+ assert.ok(channelListener2.secondCall.calledWithExactly('message', 'channel'));
+ assert.ok(patternListener.calledTwice);
+ assert.ok(patternListener.secondCall.calledWithExactly('message', 'channel'));
+ await subscriber.unsubscribe('channel');
+ await Promise.all([
+ waitTillBeenCalled(patternListener),
+ publisher.publish('channel', 'message')
+ ]);
+ assert.ok(channelListener1.calledOnce);
+ assert.ok(channelListener2.calledTwice);
+ assert.ok(patternListener.calledThrice);
+ assert.ok(patternListener.thirdCall.calledWithExactly('message', 'channel'));
+
+ await subscriber.pUnsubscribe();
+ await publisher.publish('channel', 'message');
+ assert.ok(channelListener1.calledOnce);
+ assert.ok(channelListener2.calledTwice);
+ assert.ok(patternListener.calledThrice);
+
+ // should be able to send commands when unsubsribed from all channels (see #1652)
+ await assert.doesNotReject(subscriber.ping());
+ } finally {
+ subscriber.destroy();
+ }
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should resubscribe', async publisher => {
+ const subscriber = await publisher.duplicate().connect();
+
+ try {
+ const channelListener = spy();
+ await subscriber.subscribe('channel', channelListener);
+
+ const patternListener = spy();
+ await subscriber.pSubscribe('channe*', patternListener);
+
+ await Promise.all([
+ once(subscriber, 'error'),
+ publisher.clientKill({
+ filter: 'SKIPME',
+ skipMe: true
+ })
+ ]);
+
+ await once(subscriber, 'ready');
+
+ await Promise.all([
+ waitTillBeenCalled(channelListener),
+ waitTillBeenCalled(patternListener),
+ publisher.publish('channel', 'message')
+ ]);
+ } finally {
+ subscriber.destroy();
+ }
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should not fail when message arrives right after subscribe', async publisher => {
+ const subscriber = await publisher.duplicate().connect();
+
+ try {
+ await assert.doesNotReject(Promise.all([
+ subscriber.subscribe('channel', () => {
+ // noop
+ }),
+ publisher.publish('channel', 'message')
+ ]));
+ } finally {
+ subscriber.destroy();
+ }
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should be able to quit in PubSub mode', async client => {
+ await client.subscribe('channel', () => {
+ // noop
+ });
+
+ await assert.doesNotReject(client.quit());
+
+ assert.equal(client.isOpen, false);
+ }, GLOBAL.SERVERS.OPEN);
+ });
+
+ testUtils.testWithClient('ConnectionTimeoutError', async client => {
+ const promise = assert.rejects(client.connect(), ConnectionTimeoutError),
+ start = process.hrtime.bigint();
+
+ while (process.hrtime.bigint() - start < 1_000_000) {
+ // block the event loop for 1ms, to make sure the connection will timeout
+ }
+
+ await promise;
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ socket: {
+ connectTimeout: 1
+ }
+ },
+ disableClientSetup: true
+ });
+
+ testUtils.testWithClient('client.quit', async client => {
+ await client.connect();
+
+ const pingPromise = client.ping(),
+ quitPromise = client.quit();
+ assert.equal(client.isOpen, false);
+
+ const [ping, quit] = await Promise.all([
+ pingPromise,
+ quitPromise,
+ assert.rejects(client.ping(), ClientClosedError)
+ ]);
+
+ assert.equal(ping, 'PONG');
+ assert.equal(quit, 'OK');
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ disableClientSetup: true
+ });
+
+ testUtils.testWithClient('client.disconnect', async client => {
+ const pingPromise = client.ping(),
+ disconnectPromise = client.disconnect();
+ assert.equal(client.isOpen, false);
+ await Promise.all([
+ assert.rejects(pingPromise, DisconnectsClientError),
+ assert.doesNotReject(disconnectPromise),
+ assert.rejects(client.ping(), ClientClosedError)
+ ]);
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should be able to connect after disconnect (see #1801)', async client => {
+ await client.disconnect();
+ await client.connect();
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should be able to use ref and unref', client => {
+ client.unref();
+ client.ref();
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('pingInterval', async client => {
+ assert.deepEqual(
+ await once(client, 'ping-interval'),
+ ['PONG']
+ );
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ pingInterval: 1
+ }
+ });
+
+ testUtils.testWithClient('should reject commands in connect phase when `disableOfflineQueue`', async client => {
+ const connectPromise = client.connect();
+ await assert.rejects(
+ client.ping(),
+ ClientOfflineError
+ );
+ await connectPromise;
+ await client.disconnect();
+ }, {
+ ...GLOBAL.SERVERS.OPEN,
+ clientOptions: {
+ disableOfflineQueue: true
+ },
+ disableClientSetup: true
+ });
+
+ describe('MONITOR', () => {
+ testUtils.testWithClient('should be able to monitor commands', async client => {
+ const duplicate = await client.duplicate().connect(),
+ listener = spy(message => assert.equal(typeof message, 'string'));
+ await duplicate.monitor(listener);
+
+ try {
+ await Promise.all([
+ waitTillBeenCalled(listener),
+ client.ping()
+ ]);
+ } finally {
+ duplicate.destroy();
+ }
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should keep monitoring after reconnection', async client => {
+ const duplicate = await client.duplicate().connect(),
+ listener = spy(message => assert.equal(typeof message, 'string'));
+ await duplicate.monitor(listener);
+
+ try {
+ await Promise.all([
+ once(duplicate, 'error'),
+ client.clientKill({
+ filter: 'SKIPME',
+ skipMe: true
+ })
+ ]);
+
+ await once(duplicate, 'ready');
+
+ await Promise.all([
+ waitTillBeenCalled(listener),
+ client.ping()
+ ]);
+ } finally {
+ duplicate.destroy();
+ }
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should be able to go back to "normal mode"', async client => {
+ await Promise.all([
+ client.monitor(() => { }),
+ client.reset()
+ ]);
+ await assert.doesNotReject(client.ping());
+ }, GLOBAL.SERVERS.OPEN);
+
+ testUtils.testWithClient('should respect type mapping', async client => {
+ const duplicate = await client.duplicate().connect(),
+ listener = spy(message => assert.ok(message instanceof Buffer));
+ await duplicate.withTypeMapping({
+ [RESP_TYPES.SIMPLE_STRING]: Buffer
+ }).monitor(listener);
+
+ try {
+ await Promise.all([
+ waitTillBeenCalled(listener),
+ client.ping()
+ ]);
+ } finally {
+ duplicate.destroy();
+ }
+ }, GLOBAL.SERVERS.OPEN);
+ });
+});
+
+/**
+ * Executes the provided function in a context where setImmediate is stubbed to not do anything.
+ * This blocks setImmediate callbacks from executing
+ */
+async function blockSetImmediate(fn: () => Promise) {
+ let setImmediateStub: any;
+
+ try {
+ setImmediateStub = stub(global, 'setImmediate');
+ setImmediateStub.callsFake(() => {
+ //Dont call the callback, effectively blocking execution
+ });
+ await fn();
+ } finally {
+ if (setImmediateStub) {
+ setImmediateStub.restore();
+ }
+ }
+}
diff --git a/packages/client/lib/client/index.ts b/packages/client/lib/client/index.ts
new file mode 100644
index 00000000000..ea2102c37fd
--- /dev/null
+++ b/packages/client/lib/client/index.ts
@@ -0,0 +1,1567 @@
+import COMMANDS from '../commands';
+import RedisSocket, { RedisSocketOptions } from './socket';
+import { BasicAuth, CredentialsError, CredentialsProvider, StreamingCredentialsProvider, UnableToObtainNewCredentialsError, Disposable } from '../authx';
+import RedisCommandsQueue, { CommandOptions } from './commands-queue';
+import { EventEmitter } from 'node:events';
+import { attachConfig, functionArgumentsPrefix, getTransformReply, scriptArgumentsPrefix } from '../commander';
+import { ClientClosedError, ClientOfflineError, DisconnectsClientError, WatchError } from '../errors';
+import { URL } from 'node:url';
+import { TcpSocketConnectOpts } from 'node:net';
+import { PUBSUB_TYPE, PubSubType, PubSubListener, PubSubTypeListeners, ChannelListeners } from './pub-sub';
+import { Command, CommandSignature, TypeMapping, CommanderConfig, RedisFunction, RedisFunctions, RedisModules, RedisScript, RedisScripts, ReplyUnion, RespVersions, RedisArgument, ReplyWithTypeMapping, SimpleStringReply, TransformReply, CommandArguments } from '../RESP/types';
+import RedisClientMultiCommand, { RedisClientMultiCommandType } from './multi-command';
+import { MULTI_MODE, MultiMode, RedisMultiQueuedCommand } from '../multi-command';
+import HELLO, { HelloOptions } from '../commands/HELLO';
+import { ScanOptions, ScanCommonOptions } from '../commands/SCAN';
+import { RedisLegacyClient, RedisLegacyClientType } from './legacy-mode';
+import { RedisPoolOptions, RedisClientPool } from './pool';
+import { RedisVariadicArgument, parseArgs, pushVariadicArguments } from '../commands/generic-transformers';
+import { BasicClientSideCache, ClientSideCacheConfig, ClientSideCacheProvider } from './cache';
+import { BasicCommandParser, CommandParser } from './parser';
+import SingleEntryCache from '../single-entry-cache';
+import { version } from '../../package.json'
+import EnterpriseMaintenanceManager, { MaintenanceUpdate, MovingEndpointType } from './enterprise-maintenance-manager';
+
+export interface RedisClientOptions<
+ M extends RedisModules = RedisModules,
+ F extends RedisFunctions = RedisFunctions,
+ S extends RedisScripts = RedisScripts,
+ RESP extends RespVersions = RespVersions,
+ TYPE_MAPPING extends TypeMapping = TypeMapping,
+ SocketOptions extends RedisSocketOptions = RedisSocketOptions
+> extends CommanderConfig {
+ /**
+ * `redis[s]://[[username][:password]@][host][:port][/db-number]`
+ * See [`redis`](https://www.iana.org/assignments/uri-schemes/prov/redis) and [`rediss`](https://www.iana.org/assignments/uri-schemes/prov/rediss) IANA registration for more details
+ */
+ url?: string;
+ /**
+ * Socket connection properties
+ */
+ socket?: SocketOptions;
+ /**
+ * ACL username ([see ACL guide](https://redis.io/topics/acl))
+ */
+ username?: string;
+ /**
+ * ACL password or the old "--requirepass" password
+ */
+ password?: string;
+
+ /**
+ * Provides credentials for authentication. Can be set directly or will be created internally
+ * if username/password are provided instead. If both are supplied, this credentialsProvider
+ * takes precedence over username/password.
+ */
+ credentialsProvider?: CredentialsProvider;
+ /**
+ * Client name ([see `CLIENT SETNAME`](https://redis.io/commands/client-setname))
+ */
+ name?: string;
+ /**
+ * Redis database number (see [`SELECT`](https://redis.io/commands/select) command)
+ */
+ database?: number;
+ /**
+ * Maximum length of the client's internal command queue
+ */
+ commandsQueueMaxLength?: number;
+ /**
+ * When `true`, commands are rejected when the client is reconnecting.
+ * When `false`, commands are queued for execution after reconnection.
+ */
+ disableOfflineQueue?: boolean;
+ /**
+ * Connect in [`READONLY`](https://redis.io/commands/readonly) mode
+ */
+ readonly?: boolean;
+ /**
+ * Send `PING` command at interval (in ms).
+ * Useful with Redis deployments that do not honor TCP Keep-Alive.
+ */
+ pingInterval?: number;
+ /**
+ * Default command options to be applied to all commands executed through this client.
+ *
+ * These options can be overridden on a per-command basis when calling specific commands.
+ *
+ * @property {symbol} [chainId] - Identifier for chaining commands together
+ * @property {boolean} [asap] - When true, the command is executed as soon as possible
+ * @property {AbortSignal} [abortSignal] - AbortSignal to cancel the command
+ * @property {TypeMapping} [typeMapping] - Custom type mappings between RESP and JavaScript types
+ *
+ * @example Setting default command options
+ * ```
+ * const client = createClient({
+ * commandOptions: {
+ * asap: true,
+ * typeMapping: {
+ * // Custom type mapping configuration
+ * }
+ * }
+ * });
+ * ```
+ */
+ commandOptions?: CommandOptions;
+ /**
+ * Client Side Caching configuration.
+ *
+ * Enables Redis Servers and Clients to work together to cache results from commands
+ * sent to a server. The server will notify the client when cached results are no longer valid.
+ *
+ * Note: Client Side Caching is only supported with RESP3.
+ *
+ * @example Anonymous cache configuration
+ * ```
+ * const client = createClient({
+ * RESP: 3,
+ * clientSideCache: {
+ * ttl: 0,
+ * maxEntries: 0,
+ * evictPolicy: "LRU"
+ * }
+ * });
+ * ```
+ *
+ * @example Using a controllable cache
+ * ```
+ * const cache = new BasicClientSideCache({
+ * ttl: 0,
+ * maxEntries: 0,
+ * evictPolicy: "LRU"
+ * });
+ * const client = createClient({
+ * RESP: 3,
+ * clientSideCache: cache
+ * });
+ * ```
+ */
+ clientSideCache?: ClientSideCacheProvider | ClientSideCacheConfig;
+ /**
+ * If set to true, disables sending client identifier (user-agent like message) to the redis server
+ */
+ disableClientInfo?: boolean;
+ /**
+ * Tag to append to library name that is sent to the Redis server
+ */
+ clientInfoTag?: string;
+ /**
+ * When set to true, client tracking is turned on and the client emits `invalidate` events when it receives invalidation messages from the redis server.
+ * Mutually exclusive with `clientSideCache` option.
+ */
+ emitInvalidate?: boolean;
+ /**
+ * Controls how the client handles Redis Enterprise maintenance push notifications.
+ *
+ * - `disabled`: The feature is not used by the client.
+ * - `enabled`: The client attempts to enable the feature on the server. If the server responds with an error, the connection is interrupted.
+ * - `auto`: The client attempts to enable the feature on the server. If the server returns an error, the client disables the feature and continues.
+ *
+ * The default is `auto`.
+ */
+ maintNotifications?: 'disabled' | 'enabled' | 'auto';
+ /**
+ * Controls how the client requests the endpoint to reconnect to during a MOVING notification in Redis Enterprise maintenance.
+ *
+ * - `auto`: If the connection is opened to a name or IP address that is from/resolves to a reserved private IP range, request an internal endpoint (e.g., internal-ip), otherwise an external one. If TLS is enabled, then request a FQDN.
+ * - `internal-ip`: Enforce requesting the internal IP.
+ * - `internal-fqdn`: Enforce requesting the internal FQDN.
+ * - `external-ip`: Enforce requesting the external IP address.
+ * - `external-fqdn`: Enforce requesting the external FQDN.
+ * - `none`: Used to request a null endpoint, which tells the client to reconnect based on its current config
+
+ * The default is `auto`.
+ */
+ maintEndpointType?: MovingEndpointType;
+ /**
+ * Specifies a more relaxed timeout (in milliseconds) for commands during a maintenance window.
+ * This helps minimize command timeouts during maintenance. Timeouts during maintenance period result
+ * in a `CommandTimeoutDuringMaintenance` error.
+ *
+ * The default is 10000
+ */
+ maintRelaxedCommandTimeout?: number;
+ /**
+ * Specifies a more relaxed timeout (in milliseconds) for the socket during a maintenance window.
+ * This helps minimize socket timeouts during maintenance. Timeouts during maintenance period result
+ * in a `SocketTimeoutDuringMaintenance` error.
+ *
+ * The default is 10000
+ */
+ maintRelaxedSocketTimeout?: number;
+};
+
+export type WithCommands<
+ RESP extends RespVersions,
+ TYPE_MAPPING extends TypeMapping
+> = {
+ [P in keyof typeof COMMANDS]: CommandSignature<(typeof COMMANDS)[P], RESP, TYPE_MAPPING>;
+ };
+
+export type WithModules<
+ M extends RedisModules,
+ RESP extends RespVersions,
+ TYPE_MAPPING extends TypeMapping
+> = {
+ [P in keyof M]: {
+ [C in keyof M[P]]: CommandSignature