初始化环境文件
This commit is contained in:
21
node_modules/ioredis/LICENSE
generated
vendored
Normal file
21
node_modules/ioredis/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2022 Zihua Li
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
1498
node_modules/ioredis/README.md
generated
vendored
Normal file
1498
node_modules/ioredis/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
165
node_modules/ioredis/built/Command.d.ts
generated
vendored
Normal file
165
node_modules/ioredis/built/Command.d.ts
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
/// <reference types="node" />
|
||||
import { Callback, Respondable, CommandParameter } from "./types";
|
||||
export declare type ArgumentType = string | Buffer | number | (string | Buffer | number | any[])[];
|
||||
interface CommandOptions {
|
||||
/**
|
||||
* Set the encoding of the reply, by default buffer will be returned.
|
||||
*/
|
||||
replyEncoding?: BufferEncoding | null;
|
||||
errorStack?: Error;
|
||||
keyPrefix?: string;
|
||||
/**
|
||||
* Force the command to be readOnly so it will also execute on slaves
|
||||
*/
|
||||
readOnly?: boolean;
|
||||
}
|
||||
declare type ArgumentTransformer = (args: any[]) => any[];
|
||||
declare type ReplyTransformer = (reply: any) => any;
|
||||
export interface CommandNameFlags {
|
||||
VALID_IN_SUBSCRIBER_MODE: [
|
||||
"subscribe",
|
||||
"psubscribe",
|
||||
"unsubscribe",
|
||||
"punsubscribe",
|
||||
"ssubscribe",
|
||||
"sunsubscribe",
|
||||
"ping",
|
||||
"quit"
|
||||
];
|
||||
VALID_IN_MONITOR_MODE: ["monitor", "auth"];
|
||||
ENTER_SUBSCRIBER_MODE: ["subscribe", "psubscribe", "ssubscribe"];
|
||||
EXIT_SUBSCRIBER_MODE: ["unsubscribe", "punsubscribe", "sunsubscribe"];
|
||||
WILL_DISCONNECT: ["quit"];
|
||||
HANDSHAKE_COMMANDS: ["auth", "select", "client", "readonly", "info"];
|
||||
IGNORE_RECONNECT_ON_ERROR: ["client"];
|
||||
BLOCKING_COMMANDS: [
|
||||
"blpop",
|
||||
"brpop",
|
||||
"brpoplpush",
|
||||
"blmove",
|
||||
"bzpopmin",
|
||||
"bzpopmax",
|
||||
"bzmpop",
|
||||
"blmpop",
|
||||
"xread",
|
||||
"xreadgroup"
|
||||
];
|
||||
LAST_ARG_TIMEOUT_COMMANDS: [
|
||||
"blpop",
|
||||
"brpop",
|
||||
"brpoplpush",
|
||||
"blmove",
|
||||
"bzpopmin",
|
||||
"bzpopmax"
|
||||
];
|
||||
FIRST_ARG_TIMEOUT_COMMANDS: ["bzmpop", "blmpop"];
|
||||
BLOCK_OPTION_COMMANDS: ["xread", "xreadgroup"];
|
||||
}
|
||||
/**
|
||||
* Command instance
|
||||
*
|
||||
* It's rare that you need to create a Command instance yourself.
|
||||
*
|
||||
* ```js
|
||||
* var infoCommand = new Command('info', null, function (err, result) {
|
||||
* console.log('result', result);
|
||||
* });
|
||||
*
|
||||
* redis.sendCommand(infoCommand);
|
||||
*
|
||||
* // When no callback provided, Command instance will have a `promise` property,
|
||||
* // which will resolve/reject with the result of the command.
|
||||
* var getCommand = new Command('get', ['foo']);
|
||||
* getCommand.promise.then(function (result) {
|
||||
* console.log('result', result);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export default class Command implements Respondable {
|
||||
name: string;
|
||||
static FLAGS: {
|
||||
[key in keyof CommandNameFlags]: CommandNameFlags[key];
|
||||
};
|
||||
private static flagMap?;
|
||||
private static _transformer;
|
||||
/**
|
||||
* Check whether the command has the flag
|
||||
*/
|
||||
static checkFlag<T extends keyof CommandNameFlags>(flagName: T, commandName: string): commandName is CommandNameFlags[T][number];
|
||||
static setArgumentTransformer(name: string, func: ArgumentTransformer): void;
|
||||
static setReplyTransformer(name: string, func: ReplyTransformer): void;
|
||||
private static getFlagMap;
|
||||
ignore?: boolean;
|
||||
isReadOnly?: boolean;
|
||||
args: CommandParameter[];
|
||||
inTransaction: boolean;
|
||||
pipelineIndex?: number;
|
||||
isResolved: boolean;
|
||||
reject: (err: Error) => void;
|
||||
resolve: (result: any) => void;
|
||||
promise: Promise<any>;
|
||||
private replyEncoding;
|
||||
private errorStack;
|
||||
private bufferMode;
|
||||
private callback;
|
||||
private transformed;
|
||||
private _commandTimeoutTimer?;
|
||||
private _blockingTimeoutTimer?;
|
||||
private _blockingDeadline?;
|
||||
private slot?;
|
||||
private keys?;
|
||||
/**
|
||||
* Creates an instance of Command.
|
||||
* @param name Command name
|
||||
* @param args An array of command arguments
|
||||
* @param options
|
||||
* @param callback The callback that handles the response.
|
||||
* If omit, the response will be handled via Promise
|
||||
*/
|
||||
constructor(name: string, args?: Array<ArgumentType>, options?: CommandOptions, callback?: Callback);
|
||||
getSlot(): number;
|
||||
getKeys(): Array<string | Buffer>;
|
||||
/**
|
||||
* Convert command to writable buffer or string
|
||||
*/
|
||||
toWritable(_socket: object): string | Buffer;
|
||||
stringifyArguments(): void;
|
||||
/**
|
||||
* Convert buffer/buffer[] to string/string[],
|
||||
* and apply reply transformer.
|
||||
*/
|
||||
transformReply(result: Buffer | Buffer[]): string | string[] | Buffer | Buffer[];
|
||||
/**
|
||||
* Set the wait time before terminating the attempt to execute a command
|
||||
* and generating an error.
|
||||
*/
|
||||
setTimeout(ms: number): void;
|
||||
/**
|
||||
* Set a timeout for blocking commands.
|
||||
* When the timeout expires, the command resolves with null (matching Redis behavior).
|
||||
* This handles the case of undetectable network failures (e.g., docker network disconnect)
|
||||
* where the TCP connection becomes a zombie and no close event fires.
|
||||
*/
|
||||
setBlockingTimeout(ms: number): void;
|
||||
/**
|
||||
* Extract the blocking timeout from the command arguments.
|
||||
*
|
||||
* @returns The timeout in seconds, null for indefinite blocking (timeout of 0),
|
||||
* or undefined if this is not a blocking command
|
||||
*/
|
||||
extractBlockingTimeout(): number | null | undefined;
|
||||
/**
|
||||
* Clear the command and blocking timers
|
||||
*/
|
||||
private _clearTimers;
|
||||
private initPromise;
|
||||
/**
|
||||
* Iterate through the command arguments that are considered keys.
|
||||
*/
|
||||
private _iterateKeys;
|
||||
/**
|
||||
* Convert the value from buffer to the target encoding.
|
||||
*/
|
||||
private _convertValue;
|
||||
}
|
||||
export {};
|
||||
449
node_modules/ioredis/built/Command.js
generated
vendored
Normal file
449
node_modules/ioredis/built/Command.js
generated
vendored
Normal file
@@ -0,0 +1,449 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
const calculateSlot = require("cluster-key-slot");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const utils_1 = require("./utils");
|
||||
const argumentParsers_1 = require("./utils/argumentParsers");
|
||||
/**
|
||||
* Command instance
|
||||
*
|
||||
* It's rare that you need to create a Command instance yourself.
|
||||
*
|
||||
* ```js
|
||||
* var infoCommand = new Command('info', null, function (err, result) {
|
||||
* console.log('result', result);
|
||||
* });
|
||||
*
|
||||
* redis.sendCommand(infoCommand);
|
||||
*
|
||||
* // When no callback provided, Command instance will have a `promise` property,
|
||||
* // which will resolve/reject with the result of the command.
|
||||
* var getCommand = new Command('get', ['foo']);
|
||||
* getCommand.promise.then(function (result) {
|
||||
* console.log('result', result);
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
class Command {
|
||||
/**
|
||||
* Creates an instance of Command.
|
||||
* @param name Command name
|
||||
* @param args An array of command arguments
|
||||
* @param options
|
||||
* @param callback The callback that handles the response.
|
||||
* If omit, the response will be handled via Promise
|
||||
*/
|
||||
constructor(name, args = [], options = {}, callback) {
|
||||
this.name = name;
|
||||
this.inTransaction = false;
|
||||
this.isResolved = false;
|
||||
this.transformed = false;
|
||||
this.replyEncoding = options.replyEncoding;
|
||||
this.errorStack = options.errorStack;
|
||||
this.args = args.flat();
|
||||
this.callback = callback;
|
||||
this.initPromise();
|
||||
if (options.keyPrefix) {
|
||||
// @ts-expect-error
|
||||
const isBufferKeyPrefix = options.keyPrefix instanceof Buffer;
|
||||
// @ts-expect-error
|
||||
let keyPrefixBuffer = isBufferKeyPrefix
|
||||
? options.keyPrefix
|
||||
: null;
|
||||
this._iterateKeys((key) => {
|
||||
if (key instanceof Buffer) {
|
||||
if (keyPrefixBuffer === null) {
|
||||
keyPrefixBuffer = Buffer.from(options.keyPrefix);
|
||||
}
|
||||
return Buffer.concat([keyPrefixBuffer, key]);
|
||||
}
|
||||
else if (isBufferKeyPrefix) {
|
||||
// @ts-expect-error
|
||||
return Buffer.concat([options.keyPrefix, Buffer.from(String(key))]);
|
||||
}
|
||||
return options.keyPrefix + key;
|
||||
});
|
||||
}
|
||||
if (options.readOnly) {
|
||||
this.isReadOnly = true;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Check whether the command has the flag
|
||||
*/
|
||||
static checkFlag(flagName, commandName) {
|
||||
commandName = commandName.toLowerCase();
|
||||
return !!this.getFlagMap()[flagName][commandName];
|
||||
}
|
||||
static setArgumentTransformer(name, func) {
|
||||
this._transformer.argument[name] = func;
|
||||
}
|
||||
static setReplyTransformer(name, func) {
|
||||
this._transformer.reply[name] = func;
|
||||
}
|
||||
static getFlagMap() {
|
||||
if (!this.flagMap) {
|
||||
this.flagMap = Object.keys(Command.FLAGS).reduce((map, flagName) => {
|
||||
map[flagName] = {};
|
||||
Command.FLAGS[flagName].forEach((commandName) => {
|
||||
map[flagName][commandName] = true;
|
||||
});
|
||||
return map;
|
||||
}, {});
|
||||
}
|
||||
return this.flagMap;
|
||||
}
|
||||
getSlot() {
|
||||
if (typeof this.slot === "undefined") {
|
||||
const key = this.getKeys()[0];
|
||||
this.slot = key == null ? null : calculateSlot(key);
|
||||
}
|
||||
return this.slot;
|
||||
}
|
||||
getKeys() {
|
||||
return this._iterateKeys();
|
||||
}
|
||||
/**
|
||||
* Convert command to writable buffer or string
|
||||
*/
|
||||
toWritable(_socket) {
|
||||
let result;
|
||||
const commandStr = "*" +
|
||||
(this.args.length + 1) +
|
||||
"\r\n$" +
|
||||
Buffer.byteLength(this.name) +
|
||||
"\r\n" +
|
||||
this.name +
|
||||
"\r\n";
|
||||
if (this.bufferMode) {
|
||||
const buffers = new MixedBuffers();
|
||||
buffers.push(commandStr);
|
||||
for (let i = 0; i < this.args.length; ++i) {
|
||||
const arg = this.args[i];
|
||||
if (arg instanceof Buffer) {
|
||||
if (arg.length === 0) {
|
||||
buffers.push("$0\r\n\r\n");
|
||||
}
|
||||
else {
|
||||
buffers.push("$" + arg.length + "\r\n");
|
||||
buffers.push(arg);
|
||||
buffers.push("\r\n");
|
||||
}
|
||||
}
|
||||
else {
|
||||
buffers.push("$" +
|
||||
Buffer.byteLength(arg) +
|
||||
"\r\n" +
|
||||
arg +
|
||||
"\r\n");
|
||||
}
|
||||
}
|
||||
result = buffers.toBuffer();
|
||||
}
|
||||
else {
|
||||
result = commandStr;
|
||||
for (let i = 0; i < this.args.length; ++i) {
|
||||
const arg = this.args[i];
|
||||
result +=
|
||||
"$" +
|
||||
Buffer.byteLength(arg) +
|
||||
"\r\n" +
|
||||
arg +
|
||||
"\r\n";
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
stringifyArguments() {
|
||||
for (let i = 0; i < this.args.length; ++i) {
|
||||
const arg = this.args[i];
|
||||
if (typeof arg === "string") {
|
||||
// buffers and strings don't need any transformation
|
||||
}
|
||||
else if (arg instanceof Buffer) {
|
||||
this.bufferMode = true;
|
||||
}
|
||||
else {
|
||||
this.args[i] = (0, utils_1.toArg)(arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Convert buffer/buffer[] to string/string[],
|
||||
* and apply reply transformer.
|
||||
*/
|
||||
transformReply(result) {
|
||||
if (this.replyEncoding) {
|
||||
result = (0, utils_1.convertBufferToString)(result, this.replyEncoding);
|
||||
}
|
||||
const transformer = Command._transformer.reply[this.name];
|
||||
if (transformer) {
|
||||
result = transformer(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Set the wait time before terminating the attempt to execute a command
|
||||
* and generating an error.
|
||||
*/
|
||||
setTimeout(ms) {
|
||||
if (!this._commandTimeoutTimer) {
|
||||
this._commandTimeoutTimer = setTimeout(() => {
|
||||
if (!this.isResolved) {
|
||||
this.reject(new Error("Command timed out"));
|
||||
}
|
||||
}, ms);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Set a timeout for blocking commands.
|
||||
* When the timeout expires, the command resolves with null (matching Redis behavior).
|
||||
* This handles the case of undetectable network failures (e.g., docker network disconnect)
|
||||
* where the TCP connection becomes a zombie and no close event fires.
|
||||
*/
|
||||
setBlockingTimeout(ms) {
|
||||
if (ms <= 0) {
|
||||
return;
|
||||
}
|
||||
// Clear existing timer if any (can happen when command moves from offline to command queue)
|
||||
if (this._blockingTimeoutTimer) {
|
||||
clearTimeout(this._blockingTimeoutTimer);
|
||||
this._blockingTimeoutTimer = undefined;
|
||||
}
|
||||
const now = Date.now();
|
||||
// First call: establish absolute deadline
|
||||
if (this._blockingDeadline === undefined) {
|
||||
this._blockingDeadline = now + ms;
|
||||
}
|
||||
// Check if we've already exceeded the deadline
|
||||
const remaining = this._blockingDeadline - now;
|
||||
if (remaining <= 0) {
|
||||
// Resolve with null to indicate timeout (same as Redis behavior)
|
||||
this.resolve(null);
|
||||
return;
|
||||
}
|
||||
this._blockingTimeoutTimer = setTimeout(() => {
|
||||
if (this.isResolved) {
|
||||
this._blockingTimeoutTimer = undefined;
|
||||
return;
|
||||
}
|
||||
this._blockingTimeoutTimer = undefined;
|
||||
// Timeout expired - resolve with null (same as Redis behavior when blocking command times out)
|
||||
this.resolve(null);
|
||||
}, remaining);
|
||||
}
|
||||
/**
|
||||
* Extract the blocking timeout from the command arguments.
|
||||
*
|
||||
* @returns The timeout in seconds, null for indefinite blocking (timeout of 0),
|
||||
* or undefined if this is not a blocking command
|
||||
*/
|
||||
extractBlockingTimeout() {
|
||||
const args = this.args;
|
||||
if (!args || args.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
const name = this.name.toLowerCase();
|
||||
if (Command.checkFlag("LAST_ARG_TIMEOUT_COMMANDS", name)) {
|
||||
return (0, argumentParsers_1.parseSecondsArgument)(args[args.length - 1]);
|
||||
}
|
||||
if (Command.checkFlag("FIRST_ARG_TIMEOUT_COMMANDS", name)) {
|
||||
return (0, argumentParsers_1.parseSecondsArgument)(args[0]);
|
||||
}
|
||||
if (Command.checkFlag("BLOCK_OPTION_COMMANDS", name)) {
|
||||
return (0, argumentParsers_1.parseBlockOption)(args);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
/**
|
||||
* Clear the command and blocking timers
|
||||
*/
|
||||
_clearTimers() {
|
||||
const existingTimer = this._commandTimeoutTimer;
|
||||
if (existingTimer) {
|
||||
clearTimeout(existingTimer);
|
||||
delete this._commandTimeoutTimer;
|
||||
}
|
||||
const blockingTimer = this._blockingTimeoutTimer;
|
||||
if (blockingTimer) {
|
||||
clearTimeout(blockingTimer);
|
||||
delete this._blockingTimeoutTimer;
|
||||
}
|
||||
}
|
||||
initPromise() {
|
||||
const promise = new Promise((resolve, reject) => {
|
||||
if (!this.transformed) {
|
||||
this.transformed = true;
|
||||
const transformer = Command._transformer.argument[this.name];
|
||||
if (transformer) {
|
||||
this.args = transformer(this.args);
|
||||
}
|
||||
this.stringifyArguments();
|
||||
}
|
||||
this.resolve = this._convertValue(resolve);
|
||||
this.reject = (err) => {
|
||||
this._clearTimers();
|
||||
if (this.errorStack) {
|
||||
reject((0, utils_1.optimizeErrorStack)(err, this.errorStack.stack, __dirname));
|
||||
}
|
||||
else {
|
||||
reject(err);
|
||||
}
|
||||
};
|
||||
});
|
||||
this.promise = (0, standard_as_callback_1.default)(promise, this.callback);
|
||||
}
|
||||
/**
|
||||
* Iterate through the command arguments that are considered keys.
|
||||
*/
|
||||
_iterateKeys(transform = (key) => key) {
|
||||
if (typeof this.keys === "undefined") {
|
||||
this.keys = [];
|
||||
if ((0, commands_1.exists)(this.name, { caseInsensitive: true })) {
|
||||
// @ts-expect-error
|
||||
const keyIndexes = (0, commands_1.getKeyIndexes)(this.name, this.args, {
|
||||
nameCaseInsensitive: true,
|
||||
});
|
||||
for (const index of keyIndexes) {
|
||||
this.args[index] = transform(this.args[index]);
|
||||
this.keys.push(this.args[index]);
|
||||
}
|
||||
}
|
||||
}
|
||||
return this.keys;
|
||||
}
|
||||
/**
|
||||
* Convert the value from buffer to the target encoding.
|
||||
*/
|
||||
_convertValue(resolve) {
|
||||
return (value) => {
|
||||
try {
|
||||
this._clearTimers();
|
||||
resolve(this.transformReply(value));
|
||||
this.isResolved = true;
|
||||
}
|
||||
catch (err) {
|
||||
this.reject(err);
|
||||
}
|
||||
return this.promise;
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.default = Command;
|
||||
Command.FLAGS = {
|
||||
VALID_IN_SUBSCRIBER_MODE: [
|
||||
"subscribe",
|
||||
"psubscribe",
|
||||
"unsubscribe",
|
||||
"punsubscribe",
|
||||
"ssubscribe",
|
||||
"sunsubscribe",
|
||||
"ping",
|
||||
"quit",
|
||||
],
|
||||
VALID_IN_MONITOR_MODE: ["monitor", "auth"],
|
||||
ENTER_SUBSCRIBER_MODE: ["subscribe", "psubscribe", "ssubscribe"],
|
||||
EXIT_SUBSCRIBER_MODE: ["unsubscribe", "punsubscribe", "sunsubscribe"],
|
||||
WILL_DISCONNECT: ["quit"],
|
||||
HANDSHAKE_COMMANDS: ["auth", "select", "client", "readonly", "info"],
|
||||
IGNORE_RECONNECT_ON_ERROR: ["client"],
|
||||
BLOCKING_COMMANDS: [
|
||||
"blpop",
|
||||
"brpop",
|
||||
"brpoplpush",
|
||||
"blmove",
|
||||
"bzpopmin",
|
||||
"bzpopmax",
|
||||
"bzmpop",
|
||||
"blmpop",
|
||||
"xread",
|
||||
"xreadgroup",
|
||||
],
|
||||
LAST_ARG_TIMEOUT_COMMANDS: [
|
||||
"blpop",
|
||||
"brpop",
|
||||
"brpoplpush",
|
||||
"blmove",
|
||||
"bzpopmin",
|
||||
"bzpopmax",
|
||||
],
|
||||
FIRST_ARG_TIMEOUT_COMMANDS: ["bzmpop", "blmpop"],
|
||||
BLOCK_OPTION_COMMANDS: ["xread", "xreadgroup"],
|
||||
};
|
||||
Command._transformer = {
|
||||
argument: {},
|
||||
reply: {},
|
||||
};
|
||||
const msetArgumentTransformer = function (args) {
|
||||
if (args.length === 1) {
|
||||
if (args[0] instanceof Map) {
|
||||
return (0, utils_1.convertMapToArray)(args[0]);
|
||||
}
|
||||
if (typeof args[0] === "object" && args[0] !== null) {
|
||||
return (0, utils_1.convertObjectToArray)(args[0]);
|
||||
}
|
||||
}
|
||||
return args;
|
||||
};
|
||||
const hsetArgumentTransformer = function (args) {
|
||||
if (args.length === 2) {
|
||||
if (args[1] instanceof Map) {
|
||||
return [args[0]].concat((0, utils_1.convertMapToArray)(args[1]));
|
||||
}
|
||||
if (typeof args[1] === "object" && args[1] !== null) {
|
||||
return [args[0]].concat((0, utils_1.convertObjectToArray)(args[1]));
|
||||
}
|
||||
}
|
||||
return args;
|
||||
};
|
||||
Command.setArgumentTransformer("mset", msetArgumentTransformer);
|
||||
Command.setArgumentTransformer("msetnx", msetArgumentTransformer);
|
||||
Command.setArgumentTransformer("hset", hsetArgumentTransformer);
|
||||
Command.setArgumentTransformer("hmset", hsetArgumentTransformer);
|
||||
Command.setReplyTransformer("hgetall", function (result) {
|
||||
if (Array.isArray(result)) {
|
||||
const obj = {};
|
||||
for (let i = 0; i < result.length; i += 2) {
|
||||
const key = result[i];
|
||||
const value = result[i + 1];
|
||||
if (key in obj) {
|
||||
// can only be truthy if the property is special somehow, like '__proto__' or 'constructor'
|
||||
// https://github.com/luin/ioredis/issues/1267
|
||||
Object.defineProperty(obj, key, {
|
||||
value,
|
||||
configurable: true,
|
||||
enumerable: true,
|
||||
writable: true,
|
||||
});
|
||||
}
|
||||
else {
|
||||
obj[key] = value;
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
class MixedBuffers {
|
||||
constructor() {
|
||||
this.length = 0;
|
||||
this.items = [];
|
||||
}
|
||||
push(x) {
|
||||
this.length += Buffer.byteLength(x);
|
||||
this.items.push(x);
|
||||
}
|
||||
toBuffer() {
|
||||
const result = Buffer.allocUnsafe(this.length);
|
||||
let offset = 0;
|
||||
for (const item of this.items) {
|
||||
const length = Buffer.byteLength(item);
|
||||
Buffer.isBuffer(item)
|
||||
? item.copy(result, offset)
|
||||
: result.write(item, offset, length);
|
||||
offset += length;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
37
node_modules/ioredis/built/DataHandler.d.ts
generated
vendored
Normal file
37
node_modules/ioredis/built/DataHandler.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/// <reference types="node" />
|
||||
import { NetStream, CommandItem } from "./types";
|
||||
import Deque = require("denque");
|
||||
import { EventEmitter } from "events";
|
||||
import SubscriptionSet from "./SubscriptionSet";
|
||||
export interface Condition {
|
||||
select: number;
|
||||
auth?: string | [string, string];
|
||||
subscriber: false | SubscriptionSet;
|
||||
}
|
||||
export declare type FlushQueueOptions = {
|
||||
offlineQueue?: boolean;
|
||||
commandQueue?: boolean;
|
||||
};
|
||||
export interface DataHandledable extends EventEmitter {
|
||||
stream: NetStream;
|
||||
status: string;
|
||||
condition: Condition | null;
|
||||
commandQueue: Deque<CommandItem>;
|
||||
disconnect(reconnect: boolean): void;
|
||||
recoverFromFatalError(commandError: Error, err: Error, options: FlushQueueOptions): void;
|
||||
handleReconnection(err: Error, item: CommandItem): void;
|
||||
}
|
||||
interface ParserOptions {
|
||||
stringNumbers: boolean;
|
||||
}
|
||||
export default class DataHandler {
|
||||
private redis;
|
||||
constructor(redis: DataHandledable, parserOptions: ParserOptions);
|
||||
private returnFatalError;
|
||||
private returnError;
|
||||
private returnReply;
|
||||
private handleSubscriberReply;
|
||||
private handleMonitorReply;
|
||||
private shiftCommand;
|
||||
}
|
||||
export {};
|
||||
224
node_modules/ioredis/built/DataHandler.js
generated
vendored
Normal file
224
node_modules/ioredis/built/DataHandler.js
generated
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const Command_1 = require("./Command");
|
||||
const utils_1 = require("./utils");
|
||||
const RedisParser = require("redis-parser");
|
||||
const SubscriptionSet_1 = require("./SubscriptionSet");
|
||||
const debug = (0, utils_1.Debug)("dataHandler");
|
||||
class DataHandler {
|
||||
constructor(redis, parserOptions) {
|
||||
this.redis = redis;
|
||||
const parser = new RedisParser({
|
||||
stringNumbers: parserOptions.stringNumbers,
|
||||
returnBuffers: true,
|
||||
returnError: (err) => {
|
||||
this.returnError(err);
|
||||
},
|
||||
returnFatalError: (err) => {
|
||||
this.returnFatalError(err);
|
||||
},
|
||||
returnReply: (reply) => {
|
||||
this.returnReply(reply);
|
||||
},
|
||||
});
|
||||
// prependListener ensures the parser receives and processes data before socket timeout checks are performed
|
||||
redis.stream.prependListener("data", (data) => {
|
||||
parser.execute(data);
|
||||
});
|
||||
// prependListener() doesn't enable flowing mode automatically - we need to resume the stream manually
|
||||
redis.stream.resume();
|
||||
}
|
||||
returnFatalError(err) {
|
||||
err.message += ". Please report this.";
|
||||
this.redis.recoverFromFatalError(err, err, { offlineQueue: false });
|
||||
}
|
||||
returnError(err) {
|
||||
const item = this.shiftCommand(err);
|
||||
if (!item) {
|
||||
return;
|
||||
}
|
||||
err.command = {
|
||||
name: item.command.name,
|
||||
args: item.command.args,
|
||||
};
|
||||
if (item.command.name == "ssubscribe" && err.message.includes("MOVED")) {
|
||||
this.redis.emit("moved");
|
||||
return;
|
||||
}
|
||||
this.redis.handleReconnection(err, item);
|
||||
}
|
||||
returnReply(reply) {
|
||||
if (this.handleMonitorReply(reply)) {
|
||||
return;
|
||||
}
|
||||
if (this.handleSubscriberReply(reply)) {
|
||||
return;
|
||||
}
|
||||
const item = this.shiftCommand(reply);
|
||||
if (!item) {
|
||||
return;
|
||||
}
|
||||
if (Command_1.default.checkFlag("ENTER_SUBSCRIBER_MODE", item.command.name)) {
|
||||
this.redis.condition.subscriber = new SubscriptionSet_1.default();
|
||||
this.redis.condition.subscriber.add(item.command.name, reply[1].toString());
|
||||
if (!fillSubCommand(item.command, reply[2])) {
|
||||
this.redis.commandQueue.unshift(item);
|
||||
}
|
||||
}
|
||||
else if (Command_1.default.checkFlag("EXIT_SUBSCRIBER_MODE", item.command.name)) {
|
||||
if (!fillUnsubCommand(item.command, reply[2])) {
|
||||
this.redis.commandQueue.unshift(item);
|
||||
}
|
||||
}
|
||||
else {
|
||||
item.command.resolve(reply);
|
||||
}
|
||||
}
|
||||
handleSubscriberReply(reply) {
|
||||
if (!this.redis.condition.subscriber) {
|
||||
return false;
|
||||
}
|
||||
const replyType = Array.isArray(reply) ? reply[0].toString() : null;
|
||||
debug('receive reply "%s" in subscriber mode', replyType);
|
||||
switch (replyType) {
|
||||
case "message":
|
||||
if (this.redis.listeners("message").length > 0) {
|
||||
// Check if there're listeners to avoid unnecessary `toString()`.
|
||||
this.redis.emit("message", reply[1].toString(), reply[2] ? reply[2].toString() : "");
|
||||
}
|
||||
this.redis.emit("messageBuffer", reply[1], reply[2]);
|
||||
break;
|
||||
case "pmessage": {
|
||||
const pattern = reply[1].toString();
|
||||
if (this.redis.listeners("pmessage").length > 0) {
|
||||
this.redis.emit("pmessage", pattern, reply[2].toString(), reply[3].toString());
|
||||
}
|
||||
this.redis.emit("pmessageBuffer", pattern, reply[2], reply[3]);
|
||||
break;
|
||||
}
|
||||
case "smessage": {
|
||||
if (this.redis.listeners("smessage").length > 0) {
|
||||
this.redis.emit("smessage", reply[1].toString(), reply[2] ? reply[2].toString() : "");
|
||||
}
|
||||
this.redis.emit("smessageBuffer", reply[1], reply[2]);
|
||||
break;
|
||||
}
|
||||
case "ssubscribe":
|
||||
case "subscribe":
|
||||
case "psubscribe": {
|
||||
const channel = reply[1].toString();
|
||||
this.redis.condition.subscriber.add(replyType, channel);
|
||||
const item = this.shiftCommand(reply);
|
||||
if (!item) {
|
||||
return;
|
||||
}
|
||||
if (!fillSubCommand(item.command, reply[2])) {
|
||||
this.redis.commandQueue.unshift(item);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "sunsubscribe":
|
||||
case "unsubscribe":
|
||||
case "punsubscribe": {
|
||||
const channel = reply[1] ? reply[1].toString() : null;
|
||||
if (channel) {
|
||||
this.redis.condition.subscriber.del(replyType, channel);
|
||||
}
|
||||
const count = reply[2];
|
||||
if (Number(count) === 0) {
|
||||
this.redis.condition.subscriber = false;
|
||||
}
|
||||
const item = this.shiftCommand(reply);
|
||||
if (!item) {
|
||||
return;
|
||||
}
|
||||
if (!fillUnsubCommand(item.command, count)) {
|
||||
this.redis.commandQueue.unshift(item);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
const item = this.shiftCommand(reply);
|
||||
if (!item) {
|
||||
return;
|
||||
}
|
||||
item.command.resolve(reply);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
handleMonitorReply(reply) {
|
||||
if (this.redis.status !== "monitoring") {
|
||||
return false;
|
||||
}
|
||||
const replyStr = reply.toString();
|
||||
if (replyStr === "OK") {
|
||||
// Valid commands in the monitoring mode are AUTH and MONITOR,
|
||||
// both of which always reply with 'OK'.
|
||||
// So if we got an 'OK', we can make certain that
|
||||
// the reply is made to AUTH & MONITOR.
|
||||
return false;
|
||||
}
|
||||
// Since commands sent in the monitoring mode will trigger an exception,
|
||||
// any replies we received in the monitoring mode should consider to be
|
||||
// realtime monitor data instead of result of commands.
|
||||
const len = replyStr.indexOf(" ");
|
||||
const timestamp = replyStr.slice(0, len);
|
||||
const argIndex = replyStr.indexOf('"');
|
||||
const args = replyStr
|
||||
.slice(argIndex + 1, -1)
|
||||
.split('" "')
|
||||
.map((elem) => elem.replace(/\\"/g, '"'));
|
||||
const dbAndSource = replyStr.slice(len + 2, argIndex - 2).split(" ");
|
||||
this.redis.emit("monitor", timestamp, args, dbAndSource[1], dbAndSource[0]);
|
||||
return true;
|
||||
}
|
||||
shiftCommand(reply) {
|
||||
const item = this.redis.commandQueue.shift();
|
||||
if (!item) {
|
||||
const message = "Command queue state error. If you can reproduce this, please report it.";
|
||||
const error = new Error(message +
|
||||
(reply instanceof Error
|
||||
? ` Last error: ${reply.message}`
|
||||
: ` Last reply: ${reply.toString()}`));
|
||||
this.redis.emit("error", error);
|
||||
return null;
|
||||
}
|
||||
return item;
|
||||
}
|
||||
}
|
||||
exports.default = DataHandler;
|
||||
const remainingRepliesMap = new WeakMap();
|
||||
function fillSubCommand(command, count) {
|
||||
let remainingReplies = remainingRepliesMap.has(command)
|
||||
? remainingRepliesMap.get(command)
|
||||
: command.args.length;
|
||||
remainingReplies -= 1;
|
||||
if (remainingReplies <= 0) {
|
||||
command.resolve(count);
|
||||
remainingRepliesMap.delete(command);
|
||||
return true;
|
||||
}
|
||||
remainingRepliesMap.set(command, remainingReplies);
|
||||
return false;
|
||||
}
|
||||
function fillUnsubCommand(command, count) {
|
||||
let remainingReplies = remainingRepliesMap.has(command)
|
||||
? remainingRepliesMap.get(command)
|
||||
: command.args.length;
|
||||
if (remainingReplies === 0) {
|
||||
if (Number(count) === 0) {
|
||||
remainingRepliesMap.delete(command);
|
||||
command.resolve(count);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
remainingReplies -= 1;
|
||||
if (remainingReplies <= 0) {
|
||||
command.resolve(count);
|
||||
return true;
|
||||
}
|
||||
remainingRepliesMap.set(command, remainingReplies);
|
||||
return false;
|
||||
}
|
||||
31
node_modules/ioredis/built/Pipeline.d.ts
generated
vendored
Normal file
31
node_modules/ioredis/built/Pipeline.d.ts
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
import Redis from "./Redis";
|
||||
import Cluster from "./cluster";
|
||||
import Command from "./Command";
|
||||
import Commander from "./utils/Commander";
|
||||
declare class Pipeline extends Commander<{
|
||||
type: "pipeline";
|
||||
}> {
|
||||
redis: Redis | Cluster;
|
||||
isCluster: boolean;
|
||||
isPipeline: boolean;
|
||||
leftRedirections: {
|
||||
value?: number;
|
||||
};
|
||||
promise: Promise<unknown>;
|
||||
resolve: (result: unknown) => void;
|
||||
reject: (error: Error) => void;
|
||||
private replyPending;
|
||||
private _queue;
|
||||
private _result;
|
||||
private _transactions;
|
||||
private _shaToScript;
|
||||
private preferKey;
|
||||
constructor(redis: Redis | Cluster);
|
||||
fillResult(value: unknown[], position: number): void;
|
||||
sendCommand(command: Command): unknown;
|
||||
addBatch(commands: any): this;
|
||||
}
|
||||
export default Pipeline;
|
||||
interface Pipeline {
|
||||
length: number;
|
||||
}
|
||||
342
node_modules/ioredis/built/Pipeline.js
generated
vendored
Normal file
342
node_modules/ioredis/built/Pipeline.js
generated
vendored
Normal file
@@ -0,0 +1,342 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const calculateSlot = require("cluster-key-slot");
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const util_1 = require("util");
|
||||
const Command_1 = require("./Command");
|
||||
const utils_1 = require("./utils");
|
||||
const Commander_1 = require("./utils/Commander");
|
||||
/*
|
||||
This function derives from the cluster-key-slot implementation.
|
||||
Instead of checking that all keys have the same slot, it checks that all slots are served by the same set of nodes.
|
||||
If this is satisfied, it returns the first key's slot.
|
||||
*/
|
||||
function generateMultiWithNodes(redis, keys) {
|
||||
const slot = calculateSlot(keys[0]);
|
||||
const target = redis._groupsBySlot[slot];
|
||||
for (let i = 1; i < keys.length; i++) {
|
||||
if (redis._groupsBySlot[calculateSlot(keys[i])] !== target) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return slot;
|
||||
}
|
||||
class Pipeline extends Commander_1.default {
|
||||
constructor(redis) {
|
||||
super();
|
||||
this.redis = redis;
|
||||
this.isPipeline = true;
|
||||
this.replyPending = 0;
|
||||
this._queue = [];
|
||||
this._result = [];
|
||||
this._transactions = 0;
|
||||
this._shaToScript = {};
|
||||
this.isCluster =
|
||||
this.redis.constructor.name === "Cluster" || this.redis.isCluster;
|
||||
this.options = redis.options;
|
||||
Object.keys(redis.scriptsSet).forEach((name) => {
|
||||
const script = redis.scriptsSet[name];
|
||||
this._shaToScript[script.sha] = script;
|
||||
this[name] = redis[name];
|
||||
this[name + "Buffer"] = redis[name + "Buffer"];
|
||||
});
|
||||
redis.addedBuiltinSet.forEach((name) => {
|
||||
this[name] = redis[name];
|
||||
this[name + "Buffer"] = redis[name + "Buffer"];
|
||||
});
|
||||
this.promise = new Promise((resolve, reject) => {
|
||||
this.resolve = resolve;
|
||||
this.reject = reject;
|
||||
});
|
||||
const _this = this;
|
||||
Object.defineProperty(this, "length", {
|
||||
get: function () {
|
||||
return _this._queue.length;
|
||||
},
|
||||
});
|
||||
}
|
||||
fillResult(value, position) {
|
||||
if (this._queue[position].name === "exec" && Array.isArray(value[1])) {
|
||||
const execLength = value[1].length;
|
||||
for (let i = 0; i < execLength; i++) {
|
||||
if (value[1][i] instanceof Error) {
|
||||
continue;
|
||||
}
|
||||
const cmd = this._queue[position - (execLength - i)];
|
||||
try {
|
||||
value[1][i] = cmd.transformReply(value[1][i]);
|
||||
}
|
||||
catch (err) {
|
||||
value[1][i] = err;
|
||||
}
|
||||
}
|
||||
}
|
||||
this._result[position] = value;
|
||||
if (--this.replyPending) {
|
||||
return;
|
||||
}
|
||||
if (this.isCluster) {
|
||||
let retriable = true;
|
||||
let commonError;
|
||||
for (let i = 0; i < this._result.length; ++i) {
|
||||
const error = this._result[i][0];
|
||||
const command = this._queue[i];
|
||||
if (error) {
|
||||
if (command.name === "exec" &&
|
||||
error.message ===
|
||||
"EXECABORT Transaction discarded because of previous errors.") {
|
||||
continue;
|
||||
}
|
||||
if (!commonError) {
|
||||
commonError = {
|
||||
name: error.name,
|
||||
message: error.message,
|
||||
};
|
||||
}
|
||||
else if (commonError.name !== error.name ||
|
||||
commonError.message !== error.message) {
|
||||
retriable = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else if (!command.inTransaction) {
|
||||
const isReadOnly = (0, commands_1.exists)(command.name, { caseInsensitive: true }) &&
|
||||
(0, commands_1.hasFlag)(command.name, "readonly", { nameCaseInsensitive: true });
|
||||
if (!isReadOnly) {
|
||||
retriable = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (commonError && retriable) {
|
||||
const _this = this;
|
||||
const errv = commonError.message.split(" ");
|
||||
const queue = this._queue;
|
||||
let inTransaction = false;
|
||||
this._queue = [];
|
||||
for (let i = 0; i < queue.length; ++i) {
|
||||
if (errv[0] === "ASK" &&
|
||||
!inTransaction &&
|
||||
queue[i].name !== "asking" &&
|
||||
(!queue[i - 1] || queue[i - 1].name !== "asking")) {
|
||||
const asking = new Command_1.default("asking");
|
||||
asking.ignore = true;
|
||||
this.sendCommand(asking);
|
||||
}
|
||||
queue[i].initPromise();
|
||||
this.sendCommand(queue[i]);
|
||||
inTransaction = queue[i].inTransaction;
|
||||
}
|
||||
let matched = true;
|
||||
if (typeof this.leftRedirections === "undefined") {
|
||||
this.leftRedirections = {};
|
||||
}
|
||||
const exec = function () {
|
||||
_this.exec();
|
||||
};
|
||||
const cluster = this.redis;
|
||||
cluster.handleError(commonError, this.leftRedirections, {
|
||||
moved: function (_slot, key) {
|
||||
_this.preferKey = key;
|
||||
if (cluster.slots[errv[1]]) {
|
||||
if (cluster.slots[errv[1]][0] !== key) {
|
||||
cluster.slots[errv[1]] = [key];
|
||||
}
|
||||
}
|
||||
else {
|
||||
cluster.slots[errv[1]] = [key];
|
||||
}
|
||||
cluster._groupsBySlot[errv[1]] =
|
||||
cluster._groupsIds[cluster.slots[errv[1]].join(";")];
|
||||
cluster.refreshSlotsCache();
|
||||
_this.exec();
|
||||
},
|
||||
ask: function (_slot, key) {
|
||||
_this.preferKey = key;
|
||||
_this.exec();
|
||||
},
|
||||
tryagain: exec,
|
||||
clusterDown: exec,
|
||||
connectionClosed: exec,
|
||||
maxRedirections: () => {
|
||||
matched = false;
|
||||
},
|
||||
defaults: () => {
|
||||
matched = false;
|
||||
},
|
||||
});
|
||||
if (matched) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ignoredCount = 0;
|
||||
for (let i = 0; i < this._queue.length - ignoredCount; ++i) {
|
||||
if (this._queue[i + ignoredCount].ignore) {
|
||||
ignoredCount += 1;
|
||||
}
|
||||
this._result[i] = this._result[i + ignoredCount];
|
||||
}
|
||||
this.resolve(this._result.slice(0, this._result.length - ignoredCount));
|
||||
}
|
||||
sendCommand(command) {
|
||||
if (this._transactions > 0) {
|
||||
command.inTransaction = true;
|
||||
}
|
||||
const position = this._queue.length;
|
||||
command.pipelineIndex = position;
|
||||
command.promise
|
||||
.then((result) => {
|
||||
this.fillResult([null, result], position);
|
||||
})
|
||||
.catch((error) => {
|
||||
this.fillResult([error], position);
|
||||
});
|
||||
this._queue.push(command);
|
||||
return this;
|
||||
}
|
||||
addBatch(commands) {
|
||||
let command, commandName, args;
|
||||
for (let i = 0; i < commands.length; ++i) {
|
||||
command = commands[i];
|
||||
commandName = command[0];
|
||||
args = command.slice(1);
|
||||
this[commandName].apply(this, args);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.default = Pipeline;
|
||||
// @ts-expect-error
|
||||
const multi = Pipeline.prototype.multi;
|
||||
// @ts-expect-error
|
||||
Pipeline.prototype.multi = function () {
|
||||
this._transactions += 1;
|
||||
return multi.apply(this, arguments);
|
||||
};
|
||||
// @ts-expect-error
|
||||
const execBuffer = Pipeline.prototype.execBuffer;
|
||||
// @ts-expect-error
|
||||
Pipeline.prototype.execBuffer = (0, util_1.deprecate)(function () {
|
||||
if (this._transactions > 0) {
|
||||
this._transactions -= 1;
|
||||
}
|
||||
return execBuffer.apply(this, arguments);
|
||||
}, "Pipeline#execBuffer: Use Pipeline#exec instead");
|
||||
// NOTE: To avoid an unhandled promise rejection, this will unconditionally always return this.promise,
|
||||
// which always has the rejection handled by standard-as-callback
|
||||
// adding the provided rejection callback.
|
||||
//
|
||||
// If a different promise instance were returned, that promise would cause its own unhandled promise rejection
|
||||
// errors, even if that promise unconditionally resolved to **the resolved value of** this.promise.
|
||||
Pipeline.prototype.exec = function (callback) {
|
||||
// Wait for the cluster to be connected, since we need nodes information before continuing
|
||||
if (this.isCluster && !this.redis.slots.length) {
|
||||
if (this.redis.status === "wait")
|
||||
this.redis.connect().catch(utils_1.noop);
|
||||
if (callback && !this.nodeifiedPromise) {
|
||||
this.nodeifiedPromise = true;
|
||||
(0, standard_as_callback_1.default)(this.promise, callback);
|
||||
}
|
||||
this.redis.delayUntilReady((err) => {
|
||||
if (err) {
|
||||
this.reject(err);
|
||||
return;
|
||||
}
|
||||
this.exec(callback);
|
||||
});
|
||||
return this.promise;
|
||||
}
|
||||
if (this._transactions > 0) {
|
||||
this._transactions -= 1;
|
||||
return execBuffer.apply(this, arguments);
|
||||
}
|
||||
if (!this.nodeifiedPromise) {
|
||||
this.nodeifiedPromise = true;
|
||||
(0, standard_as_callback_1.default)(this.promise, callback);
|
||||
}
|
||||
if (!this._queue.length) {
|
||||
this.resolve([]);
|
||||
}
|
||||
let pipelineSlot;
|
||||
if (this.isCluster) {
|
||||
// List of the first key for each command
|
||||
const sampleKeys = [];
|
||||
for (let i = 0; i < this._queue.length; i++) {
|
||||
const keys = this._queue[i].getKeys();
|
||||
if (keys.length) {
|
||||
sampleKeys.push(keys[0]);
|
||||
}
|
||||
// For each command, check that the keys belong to the same slot
|
||||
if (keys.length && calculateSlot.generateMulti(keys) < 0) {
|
||||
this.reject(new Error("All the keys in a pipeline command should belong to the same slot"));
|
||||
return this.promise;
|
||||
}
|
||||
}
|
||||
if (sampleKeys.length) {
|
||||
pipelineSlot = generateMultiWithNodes(this.redis, sampleKeys);
|
||||
if (pipelineSlot < 0) {
|
||||
this.reject(new Error("All keys in the pipeline should belong to the same slots allocation group"));
|
||||
return this.promise;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Send the pipeline to a random node
|
||||
pipelineSlot = (Math.random() * 16384) | 0;
|
||||
}
|
||||
}
|
||||
const _this = this;
|
||||
execPipeline();
|
||||
return this.promise;
|
||||
function execPipeline() {
|
||||
let writePending = (_this.replyPending = _this._queue.length);
|
||||
let node;
|
||||
if (_this.isCluster) {
|
||||
node = {
|
||||
slot: pipelineSlot,
|
||||
redis: _this.redis.connectionPool.nodes.all[_this.preferKey],
|
||||
};
|
||||
}
|
||||
let data = "";
|
||||
let buffers;
|
||||
const stream = {
|
||||
isPipeline: true,
|
||||
destination: _this.isCluster ? node : { redis: _this.redis },
|
||||
write(writable) {
|
||||
if (typeof writable !== "string") {
|
||||
if (!buffers) {
|
||||
buffers = [];
|
||||
}
|
||||
if (data) {
|
||||
buffers.push(Buffer.from(data, "utf8"));
|
||||
data = "";
|
||||
}
|
||||
buffers.push(writable);
|
||||
}
|
||||
else {
|
||||
data += writable;
|
||||
}
|
||||
if (!--writePending) {
|
||||
if (buffers) {
|
||||
if (data) {
|
||||
buffers.push(Buffer.from(data, "utf8"));
|
||||
}
|
||||
stream.destination.redis.stream.write(Buffer.concat(buffers));
|
||||
}
|
||||
else {
|
||||
stream.destination.redis.stream.write(data);
|
||||
}
|
||||
// Reset writePending for resending
|
||||
writePending = _this._queue.length;
|
||||
data = "";
|
||||
buffers = undefined;
|
||||
}
|
||||
},
|
||||
};
|
||||
for (let i = 0; i < _this._queue.length; ++i) {
|
||||
_this.redis.sendCommand(_this._queue[i], stream, node);
|
||||
}
|
||||
return _this.promise;
|
||||
}
|
||||
};
|
||||
232
node_modules/ioredis/built/Redis.d.ts
generated
vendored
Normal file
232
node_modules/ioredis/built/Redis.d.ts
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import Cluster from "./cluster";
|
||||
import Command from "./Command";
|
||||
import { DataHandledable, FlushQueueOptions, Condition } from "./DataHandler";
|
||||
import { RedisOptions } from "./redis/RedisOptions";
|
||||
import ScanStream from "./ScanStream";
|
||||
import { Transaction } from "./transaction";
|
||||
import { Callback, CommandItem, NetStream, ScanStreamOptions, WriteableStream } from "./types";
|
||||
import Commander from "./utils/Commander";
|
||||
import Deque = require("denque");
|
||||
declare type RedisStatus = "wait" | "reconnecting" | "connecting" | "connect" | "ready" | "close" | "end";
|
||||
/**
|
||||
* This is the major component of ioredis.
|
||||
* Use it to connect to a standalone Redis server or Sentinels.
|
||||
*
|
||||
* ```typescript
|
||||
* const redis = new Redis(); // Default port is 6379
|
||||
* async function main() {
|
||||
* redis.set("foo", "bar");
|
||||
* redis.get("foo", (err, result) => {
|
||||
* // `result` should be "bar"
|
||||
* console.log(err, result);
|
||||
* });
|
||||
* // Or use Promise
|
||||
* const result = await redis.get("foo");
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
declare class Redis extends Commander implements DataHandledable {
|
||||
static Cluster: typeof Cluster;
|
||||
static Command: typeof Command;
|
||||
/**
|
||||
* Default options
|
||||
*/
|
||||
private static defaultOptions;
|
||||
/**
|
||||
* Create a Redis instance.
|
||||
* This is the same as `new Redis()` but is included for compatibility with node-redis.
|
||||
*/
|
||||
static createClient(...args: ConstructorParameters<typeof Redis>): Redis;
|
||||
options: RedisOptions;
|
||||
status: RedisStatus;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
stream: NetStream;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
isCluster: boolean;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
condition: Condition | null;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
commandQueue: Deque<CommandItem>;
|
||||
private connector;
|
||||
private reconnectTimeout;
|
||||
private offlineQueue;
|
||||
private connectionEpoch;
|
||||
private retryAttempts;
|
||||
private manuallyClosing;
|
||||
private socketTimeoutTimer;
|
||||
private _autoPipelines;
|
||||
private _runningAutoPipelines;
|
||||
constructor(port: number, host: string, options: RedisOptions);
|
||||
constructor(path: string, options: RedisOptions);
|
||||
constructor(port: number, options: RedisOptions);
|
||||
constructor(port: number, host: string);
|
||||
constructor(options: RedisOptions);
|
||||
constructor(port: number);
|
||||
constructor(path: string);
|
||||
constructor();
|
||||
get autoPipelineQueueSize(): number;
|
||||
/**
|
||||
* Create a connection to Redis.
|
||||
* This method will be invoked automatically when creating a new Redis instance
|
||||
* unless `lazyConnect: true` is passed.
|
||||
*
|
||||
* When calling this method manually, a Promise is returned, which will
|
||||
* be resolved when the connection status is ready. The promise can reject
|
||||
* if the connection fails, times out, or if Redis is already connecting/connected.
|
||||
*/
|
||||
connect(callback?: Callback<void>): Promise<void>;
|
||||
/**
|
||||
* Disconnect from Redis.
|
||||
*
|
||||
* This method closes the connection immediately,
|
||||
* and may lose some pending replies that haven't written to client.
|
||||
* If you want to wait for the pending replies, use Redis#quit instead.
|
||||
*/
|
||||
disconnect(reconnect?: boolean): void;
|
||||
/**
|
||||
* Disconnect from Redis.
|
||||
*
|
||||
* @deprecated
|
||||
*/
|
||||
end(): void;
|
||||
/**
|
||||
* Create a new instance with the same options as the current one.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* var redis = new Redis(6380);
|
||||
* var anotherRedis = redis.duplicate();
|
||||
* ```
|
||||
*/
|
||||
duplicate(override?: Partial<RedisOptions>): Redis;
|
||||
/**
|
||||
* Mode of the connection.
|
||||
*
|
||||
* One of `"normal"`, `"subscriber"`, or `"monitor"`. When the connection is
|
||||
* not in `"normal"` mode, certain commands are not allowed.
|
||||
*/
|
||||
get mode(): "normal" | "subscriber" | "monitor";
|
||||
/**
|
||||
* Listen for all requests received by the server in real time.
|
||||
*
|
||||
* This command will create a new connection to Redis and send a
|
||||
* MONITOR command via the new connection in order to avoid disturbing
|
||||
* the current connection.
|
||||
*
|
||||
* @param callback The callback function. If omit, a promise will be returned.
|
||||
* @example
|
||||
* ```js
|
||||
* var redis = new Redis();
|
||||
* redis.monitor(function (err, monitor) {
|
||||
* // Entering monitoring mode.
|
||||
* monitor.on('monitor', function (time, args, source, database) {
|
||||
* console.log(time + ": " + util.inspect(args));
|
||||
* });
|
||||
* });
|
||||
*
|
||||
* // supports promise as well as other commands
|
||||
* redis.monitor().then(function (monitor) {
|
||||
* monitor.on('monitor', function (time, args, source, database) {
|
||||
* console.log(time + ": " + util.inspect(args));
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
monitor(callback?: Callback<Redis>): Promise<Redis>;
|
||||
/**
|
||||
* Send a command to Redis
|
||||
*
|
||||
* This method is used internally and in most cases you should not
|
||||
* use it directly. If you need to send a command that is not supported
|
||||
* by the library, you can use the `call` method:
|
||||
*
|
||||
* ```js
|
||||
* const redis = new Redis();
|
||||
*
|
||||
* redis.call('set', 'foo', 'bar');
|
||||
* // or
|
||||
* redis.call(['set', 'foo', 'bar']);
|
||||
* ```
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command: Command, stream?: WriteableStream): unknown;
|
||||
private getBlockingTimeoutInMs;
|
||||
private getConfiguredBlockingTimeout;
|
||||
private setSocketTimeout;
|
||||
scanStream(options?: ScanStreamOptions): ScanStream;
|
||||
scanBufferStream(options?: ScanStreamOptions): ScanStream;
|
||||
sscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
sscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
hscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
hscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
zscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
zscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
/**
|
||||
* Emit only when there's at least one listener.
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
silentEmit(eventName: string, arg?: unknown): boolean;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
recoverFromFatalError(_commandError: Error, err: Error, options: FlushQueueOptions): void;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
handleReconnection(err: Error, item: CommandItem): void;
|
||||
/**
|
||||
* Get description of the connection. Used for debugging.
|
||||
*/
|
||||
private _getDescription;
|
||||
private resetCommandQueue;
|
||||
private resetOfflineQueue;
|
||||
private parseOptions;
|
||||
/**
|
||||
* Change instance's status
|
||||
*/
|
||||
private setStatus;
|
||||
private createScanStream;
|
||||
/**
|
||||
* Flush offline queue and command queue with error.
|
||||
*
|
||||
* @param error The error object to send to the commands
|
||||
* @param options options
|
||||
*/
|
||||
private flushQueue;
|
||||
/**
|
||||
* Check whether Redis has finished loading the persistent data and is able to
|
||||
* process commands.
|
||||
*/
|
||||
private _readyCheck;
|
||||
}
|
||||
interface Redis extends EventEmitter {
|
||||
on(event: "message", cb: (channel: string, message: string) => void): this;
|
||||
once(event: "message", cb: (channel: string, message: string) => void): this;
|
||||
on(event: "messageBuffer", cb: (channel: Buffer, message: Buffer) => void): this;
|
||||
once(event: "messageBuffer", cb: (channel: Buffer, message: Buffer) => void): this;
|
||||
on(event: "pmessage", cb: (pattern: string, channel: string, message: string) => void): this;
|
||||
once(event: "pmessage", cb: (pattern: string, channel: string, message: string) => void): this;
|
||||
on(event: "pmessageBuffer", cb: (pattern: string, channel: Buffer, message: Buffer) => void): this;
|
||||
once(event: "pmessageBuffer", cb: (pattern: string, channel: Buffer, message: Buffer) => void): this;
|
||||
on(event: "error", cb: (error: Error) => void): this;
|
||||
once(event: "error", cb: (error: Error) => void): this;
|
||||
on(event: RedisStatus, cb: () => void): this;
|
||||
once(event: RedisStatus, cb: () => void): this;
|
||||
on(event: string | symbol, listener: (...args: any[]) => void): this;
|
||||
once(event: string | symbol, listener: (...args: any[]) => void): this;
|
||||
}
|
||||
interface Redis extends Transaction {
|
||||
}
|
||||
export default Redis;
|
||||
745
node_modules/ioredis/built/Redis.js
generated
vendored
Normal file
745
node_modules/ioredis/built/Redis.js
generated
vendored
Normal file
@@ -0,0 +1,745 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
const events_1 = require("events");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const cluster_1 = require("./cluster");
|
||||
const Command_1 = require("./Command");
|
||||
const connectors_1 = require("./connectors");
|
||||
const SentinelConnector_1 = require("./connectors/SentinelConnector");
|
||||
const eventHandler = require("./redis/event_handler");
|
||||
const RedisOptions_1 = require("./redis/RedisOptions");
|
||||
const ScanStream_1 = require("./ScanStream");
|
||||
const transaction_1 = require("./transaction");
|
||||
const utils_1 = require("./utils");
|
||||
const applyMixin_1 = require("./utils/applyMixin");
|
||||
const Commander_1 = require("./utils/Commander");
|
||||
const lodash_1 = require("./utils/lodash");
|
||||
const Deque = require("denque");
|
||||
const debug = (0, utils_1.Debug)("redis");
|
||||
/**
|
||||
* This is the major component of ioredis.
|
||||
* Use it to connect to a standalone Redis server or Sentinels.
|
||||
*
|
||||
* ```typescript
|
||||
* const redis = new Redis(); // Default port is 6379
|
||||
* async function main() {
|
||||
* redis.set("foo", "bar");
|
||||
* redis.get("foo", (err, result) => {
|
||||
* // `result` should be "bar"
|
||||
* console.log(err, result);
|
||||
* });
|
||||
* // Or use Promise
|
||||
* const result = await redis.get("foo");
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
class Redis extends Commander_1.default {
|
||||
constructor(arg1, arg2, arg3) {
|
||||
super();
|
||||
this.status = "wait";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this.isCluster = false;
|
||||
this.reconnectTimeout = null;
|
||||
this.connectionEpoch = 0;
|
||||
this.retryAttempts = 0;
|
||||
this.manuallyClosing = false;
|
||||
// Prepare autopipelines structures
|
||||
this._autoPipelines = new Map();
|
||||
this._runningAutoPipelines = new Set();
|
||||
this.parseOptions(arg1, arg2, arg3);
|
||||
events_1.EventEmitter.call(this);
|
||||
this.resetCommandQueue();
|
||||
this.resetOfflineQueue();
|
||||
if (this.options.Connector) {
|
||||
this.connector = new this.options.Connector(this.options);
|
||||
}
|
||||
else if (this.options.sentinels) {
|
||||
const sentinelConnector = new SentinelConnector_1.default(this.options);
|
||||
sentinelConnector.emitter = this;
|
||||
this.connector = sentinelConnector;
|
||||
}
|
||||
else {
|
||||
this.connector = new connectors_1.StandaloneConnector(this.options);
|
||||
}
|
||||
if (this.options.scripts) {
|
||||
Object.entries(this.options.scripts).forEach(([name, definition]) => {
|
||||
this.defineCommand(name, definition);
|
||||
});
|
||||
}
|
||||
// end(or wait) -> connecting -> connect -> ready -> end
|
||||
if (this.options.lazyConnect) {
|
||||
this.setStatus("wait");
|
||||
}
|
||||
else {
|
||||
this.connect().catch(lodash_1.noop);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Create a Redis instance.
|
||||
* This is the same as `new Redis()` but is included for compatibility with node-redis.
|
||||
*/
|
||||
static createClient(...args) {
|
||||
return new Redis(...args);
|
||||
}
|
||||
get autoPipelineQueueSize() {
|
||||
let queued = 0;
|
||||
for (const pipeline of this._autoPipelines.values()) {
|
||||
queued += pipeline.length;
|
||||
}
|
||||
return queued;
|
||||
}
|
||||
/**
|
||||
* Create a connection to Redis.
|
||||
* This method will be invoked automatically when creating a new Redis instance
|
||||
* unless `lazyConnect: true` is passed.
|
||||
*
|
||||
* When calling this method manually, a Promise is returned, which will
|
||||
* be resolved when the connection status is ready. The promise can reject
|
||||
* if the connection fails, times out, or if Redis is already connecting/connected.
|
||||
*/
|
||||
connect(callback) {
|
||||
const promise = new Promise((resolve, reject) => {
|
||||
if (this.status === "connecting" ||
|
||||
this.status === "connect" ||
|
||||
this.status === "ready") {
|
||||
reject(new Error("Redis is already connecting/connected"));
|
||||
return;
|
||||
}
|
||||
this.connectionEpoch += 1;
|
||||
this.setStatus("connecting");
|
||||
const { options } = this;
|
||||
this.condition = {
|
||||
select: options.db,
|
||||
auth: options.username
|
||||
? [options.username, options.password]
|
||||
: options.password,
|
||||
subscriber: false,
|
||||
};
|
||||
const _this = this;
|
||||
(0, standard_as_callback_1.default)(this.connector.connect(function (type, err) {
|
||||
_this.silentEmit(type, err);
|
||||
}), function (err, stream) {
|
||||
if (err) {
|
||||
_this.flushQueue(err);
|
||||
_this.silentEmit("error", err);
|
||||
reject(err);
|
||||
_this.setStatus("end");
|
||||
return;
|
||||
}
|
||||
let CONNECT_EVENT = options.tls ? "secureConnect" : "connect";
|
||||
if ("sentinels" in options &&
|
||||
options.sentinels &&
|
||||
!options.enableTLSForSentinelMode) {
|
||||
CONNECT_EVENT = "connect";
|
||||
}
|
||||
_this.stream = stream;
|
||||
if (options.noDelay) {
|
||||
stream.setNoDelay(true);
|
||||
}
|
||||
// Node ignores setKeepAlive before connect, therefore we wait for the event:
|
||||
// https://github.com/nodejs/node/issues/31663
|
||||
if (typeof options.keepAlive === "number") {
|
||||
if (stream.connecting) {
|
||||
stream.once(CONNECT_EVENT, () => {
|
||||
stream.setKeepAlive(true, options.keepAlive);
|
||||
});
|
||||
}
|
||||
else {
|
||||
stream.setKeepAlive(true, options.keepAlive);
|
||||
}
|
||||
}
|
||||
if (stream.connecting) {
|
||||
stream.once(CONNECT_EVENT, eventHandler.connectHandler(_this));
|
||||
if (options.connectTimeout) {
|
||||
/*
|
||||
* Typically, Socket#setTimeout(0) will clear the timer
|
||||
* set before. However, in some platforms (Electron 3.x~4.x),
|
||||
* the timer will not be cleared. So we introduce a variable here.
|
||||
*
|
||||
* See https://github.com/electron/electron/issues/14915
|
||||
*/
|
||||
let connectTimeoutCleared = false;
|
||||
stream.setTimeout(options.connectTimeout, function () {
|
||||
if (connectTimeoutCleared) {
|
||||
return;
|
||||
}
|
||||
stream.setTimeout(0);
|
||||
stream.destroy();
|
||||
const err = new Error("connect ETIMEDOUT");
|
||||
// @ts-expect-error
|
||||
err.errorno = "ETIMEDOUT";
|
||||
// @ts-expect-error
|
||||
err.code = "ETIMEDOUT";
|
||||
// @ts-expect-error
|
||||
err.syscall = "connect";
|
||||
eventHandler.errorHandler(_this)(err);
|
||||
});
|
||||
stream.once(CONNECT_EVENT, function () {
|
||||
connectTimeoutCleared = true;
|
||||
stream.setTimeout(0);
|
||||
});
|
||||
}
|
||||
}
|
||||
else if (stream.destroyed) {
|
||||
const firstError = _this.connector.firstError;
|
||||
if (firstError) {
|
||||
process.nextTick(() => {
|
||||
eventHandler.errorHandler(_this)(firstError);
|
||||
});
|
||||
}
|
||||
process.nextTick(eventHandler.closeHandler(_this));
|
||||
}
|
||||
else {
|
||||
process.nextTick(eventHandler.connectHandler(_this));
|
||||
}
|
||||
if (!stream.destroyed) {
|
||||
stream.once("error", eventHandler.errorHandler(_this));
|
||||
stream.once("close", eventHandler.closeHandler(_this));
|
||||
}
|
||||
const connectionReadyHandler = function () {
|
||||
_this.removeListener("close", connectionCloseHandler);
|
||||
resolve();
|
||||
};
|
||||
var connectionCloseHandler = function () {
|
||||
_this.removeListener("ready", connectionReadyHandler);
|
||||
reject(new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
};
|
||||
_this.once("ready", connectionReadyHandler);
|
||||
_this.once("close", connectionCloseHandler);
|
||||
});
|
||||
});
|
||||
return (0, standard_as_callback_1.default)(promise, callback);
|
||||
}
|
||||
/**
|
||||
* Disconnect from Redis.
|
||||
*
|
||||
* This method closes the connection immediately,
|
||||
* and may lose some pending replies that haven't written to client.
|
||||
* If you want to wait for the pending replies, use Redis#quit instead.
|
||||
*/
|
||||
disconnect(reconnect = false) {
|
||||
if (!reconnect) {
|
||||
this.manuallyClosing = true;
|
||||
}
|
||||
if (this.reconnectTimeout && !reconnect) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
this.reconnectTimeout = null;
|
||||
}
|
||||
if (this.status === "wait") {
|
||||
eventHandler.closeHandler(this)();
|
||||
}
|
||||
else {
|
||||
this.connector.disconnect();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Disconnect from Redis.
|
||||
*
|
||||
* @deprecated
|
||||
*/
|
||||
end() {
|
||||
this.disconnect();
|
||||
}
|
||||
/**
|
||||
* Create a new instance with the same options as the current one.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* var redis = new Redis(6380);
|
||||
* var anotherRedis = redis.duplicate();
|
||||
* ```
|
||||
*/
|
||||
duplicate(override) {
|
||||
return new Redis({ ...this.options, ...override });
|
||||
}
|
||||
/**
|
||||
* Mode of the connection.
|
||||
*
|
||||
* One of `"normal"`, `"subscriber"`, or `"monitor"`. When the connection is
|
||||
* not in `"normal"` mode, certain commands are not allowed.
|
||||
*/
|
||||
get mode() {
|
||||
var _a;
|
||||
return this.options.monitor
|
||||
? "monitor"
|
||||
: ((_a = this.condition) === null || _a === void 0 ? void 0 : _a.subscriber)
|
||||
? "subscriber"
|
||||
: "normal";
|
||||
}
|
||||
/**
|
||||
* Listen for all requests received by the server in real time.
|
||||
*
|
||||
* This command will create a new connection to Redis and send a
|
||||
* MONITOR command via the new connection in order to avoid disturbing
|
||||
* the current connection.
|
||||
*
|
||||
* @param callback The callback function. If omit, a promise will be returned.
|
||||
* @example
|
||||
* ```js
|
||||
* var redis = new Redis();
|
||||
* redis.monitor(function (err, monitor) {
|
||||
* // Entering monitoring mode.
|
||||
* monitor.on('monitor', function (time, args, source, database) {
|
||||
* console.log(time + ": " + util.inspect(args));
|
||||
* });
|
||||
* });
|
||||
*
|
||||
* // supports promise as well as other commands
|
||||
* redis.monitor().then(function (monitor) {
|
||||
* monitor.on('monitor', function (time, args, source, database) {
|
||||
* console.log(time + ": " + util.inspect(args));
|
||||
* });
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
monitor(callback) {
|
||||
const monitorInstance = this.duplicate({
|
||||
monitor: true,
|
||||
lazyConnect: false,
|
||||
});
|
||||
return (0, standard_as_callback_1.default)(new Promise(function (resolve, reject) {
|
||||
monitorInstance.once("error", reject);
|
||||
monitorInstance.once("monitoring", function () {
|
||||
resolve(monitorInstance);
|
||||
});
|
||||
}), callback);
|
||||
}
|
||||
/**
|
||||
* Send a command to Redis
|
||||
*
|
||||
* This method is used internally and in most cases you should not
|
||||
* use it directly. If you need to send a command that is not supported
|
||||
* by the library, you can use the `call` method:
|
||||
*
|
||||
* ```js
|
||||
* const redis = new Redis();
|
||||
*
|
||||
* redis.call('set', 'foo', 'bar');
|
||||
* // or
|
||||
* redis.call(['set', 'foo', 'bar']);
|
||||
* ```
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command, stream) {
|
||||
var _a, _b;
|
||||
if (this.status === "wait") {
|
||||
this.connect().catch(lodash_1.noop);
|
||||
}
|
||||
if (this.status === "end") {
|
||||
command.reject(new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
return command.promise;
|
||||
}
|
||||
if (((_a = this.condition) === null || _a === void 0 ? void 0 : _a.subscriber) &&
|
||||
!Command_1.default.checkFlag("VALID_IN_SUBSCRIBER_MODE", command.name)) {
|
||||
command.reject(new Error("Connection in subscriber mode, only subscriber commands may be used"));
|
||||
return command.promise;
|
||||
}
|
||||
if (typeof this.options.commandTimeout === "number") {
|
||||
command.setTimeout(this.options.commandTimeout);
|
||||
}
|
||||
const blockingTimeout = this.getBlockingTimeoutInMs(command);
|
||||
let writable = this.status === "ready" ||
|
||||
(!stream &&
|
||||
this.status === "connect" &&
|
||||
(0, commands_1.exists)(command.name, { caseInsensitive: true }) &&
|
||||
((0, commands_1.hasFlag)(command.name, "loading", { nameCaseInsensitive: true }) ||
|
||||
Command_1.default.checkFlag("HANDSHAKE_COMMANDS", command.name)));
|
||||
if (!this.stream) {
|
||||
writable = false;
|
||||
}
|
||||
else if (!this.stream.writable) {
|
||||
writable = false;
|
||||
// @ts-expect-error
|
||||
}
|
||||
else if (this.stream._writableState && this.stream._writableState.ended) {
|
||||
// TODO: We should be able to remove this as the PR has already been merged.
|
||||
// https://github.com/iojs/io.js/pull/1217
|
||||
writable = false;
|
||||
}
|
||||
if (!writable) {
|
||||
if (!this.options.enableOfflineQueue) {
|
||||
command.reject(new Error("Stream isn't writeable and enableOfflineQueue options is false"));
|
||||
return command.promise;
|
||||
}
|
||||
if (command.name === "quit" && this.offlineQueue.length === 0) {
|
||||
this.disconnect();
|
||||
command.resolve(Buffer.from("OK"));
|
||||
return command.promise;
|
||||
}
|
||||
// @ts-expect-error
|
||||
if (debug.enabled) {
|
||||
debug("queue command[%s]: %d -> %s(%o)", this._getDescription(), this.condition.select, command.name, command.args);
|
||||
}
|
||||
this.offlineQueue.push({
|
||||
command: command,
|
||||
stream: stream,
|
||||
select: this.condition.select,
|
||||
});
|
||||
// For blocking commands in the offline queue, arm a client-side timeout
|
||||
// only when blockingTimeout is configured. Without this option, queued
|
||||
// blocking commands may wait indefinitely on a dead connection.
|
||||
if (Command_1.default.checkFlag("BLOCKING_COMMANDS", command.name)) {
|
||||
const offlineTimeout = this.getConfiguredBlockingTimeout();
|
||||
if (offlineTimeout !== undefined) {
|
||||
command.setBlockingTimeout(offlineTimeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// @ts-expect-error
|
||||
if (debug.enabled) {
|
||||
debug("write command[%s]: %d -> %s(%o)", this._getDescription(), (_b = this.condition) === null || _b === void 0 ? void 0 : _b.select, command.name, command.args);
|
||||
}
|
||||
if (stream) {
|
||||
if ("isPipeline" in stream && stream.isPipeline) {
|
||||
stream.write(command.toWritable(stream.destination.redis.stream));
|
||||
}
|
||||
else {
|
||||
stream.write(command.toWritable(stream));
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.stream.write(command.toWritable(this.stream));
|
||||
}
|
||||
this.commandQueue.push({
|
||||
command: command,
|
||||
stream: stream,
|
||||
select: this.condition.select,
|
||||
});
|
||||
if (blockingTimeout !== undefined) {
|
||||
command.setBlockingTimeout(blockingTimeout);
|
||||
}
|
||||
if (Command_1.default.checkFlag("WILL_DISCONNECT", command.name)) {
|
||||
this.manuallyClosing = true;
|
||||
}
|
||||
if (this.options.socketTimeout !== undefined && this.socketTimeoutTimer === undefined) {
|
||||
this.setSocketTimeout();
|
||||
}
|
||||
}
|
||||
if (command.name === "select" && (0, utils_1.isInt)(command.args[0])) {
|
||||
const db = parseInt(command.args[0], 10);
|
||||
if (this.condition.select !== db) {
|
||||
this.condition.select = db;
|
||||
this.emit("select", db);
|
||||
debug("switch to db [%d]", this.condition.select);
|
||||
}
|
||||
}
|
||||
return command.promise;
|
||||
}
|
||||
getBlockingTimeoutInMs(command) {
|
||||
var _a;
|
||||
if (!Command_1.default.checkFlag("BLOCKING_COMMANDS", command.name)) {
|
||||
return undefined;
|
||||
}
|
||||
// Feature is opt-in: only enabled when blockingTimeout is set to a positive number
|
||||
const configuredTimeout = this.getConfiguredBlockingTimeout();
|
||||
if (configuredTimeout === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
const timeout = command.extractBlockingTimeout();
|
||||
if (typeof timeout === "number") {
|
||||
if (timeout > 0) {
|
||||
// Finite timeout from command args - add grace period
|
||||
return timeout + ((_a = this.options.blockingTimeoutGrace) !== null && _a !== void 0 ? _a : RedisOptions_1.DEFAULT_REDIS_OPTIONS.blockingTimeoutGrace);
|
||||
}
|
||||
// Command has timeout=0 (block forever), use blockingTimeout option as safety net
|
||||
return configuredTimeout;
|
||||
}
|
||||
if (timeout === null) {
|
||||
// No BLOCK option found (e.g., XREAD without BLOCK), use blockingTimeout as safety net
|
||||
return configuredTimeout;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
getConfiguredBlockingTimeout() {
|
||||
if (typeof this.options.blockingTimeout === "number" &&
|
||||
this.options.blockingTimeout > 0) {
|
||||
return this.options.blockingTimeout;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
setSocketTimeout() {
|
||||
this.socketTimeoutTimer = setTimeout(() => {
|
||||
this.stream.destroy(new Error(`Socket timeout. Expecting data, but didn't receive any in ${this.options.socketTimeout}ms.`));
|
||||
this.socketTimeoutTimer = undefined;
|
||||
}, this.options.socketTimeout);
|
||||
// this handler must run after the "data" handler in "DataHandler"
|
||||
// so that `this.commandQueue.length` will be updated
|
||||
this.stream.once("data", () => {
|
||||
clearTimeout(this.socketTimeoutTimer);
|
||||
this.socketTimeoutTimer = undefined;
|
||||
if (this.commandQueue.length === 0)
|
||||
return;
|
||||
this.setSocketTimeout();
|
||||
});
|
||||
}
|
||||
scanStream(options) {
|
||||
return this.createScanStream("scan", { options });
|
||||
}
|
||||
scanBufferStream(options) {
|
||||
return this.createScanStream("scanBuffer", { options });
|
||||
}
|
||||
sscanStream(key, options) {
|
||||
return this.createScanStream("sscan", { key, options });
|
||||
}
|
||||
sscanBufferStream(key, options) {
|
||||
return this.createScanStream("sscanBuffer", { key, options });
|
||||
}
|
||||
hscanStream(key, options) {
|
||||
return this.createScanStream("hscan", { key, options });
|
||||
}
|
||||
hscanBufferStream(key, options) {
|
||||
return this.createScanStream("hscanBuffer", { key, options });
|
||||
}
|
||||
zscanStream(key, options) {
|
||||
return this.createScanStream("zscan", { key, options });
|
||||
}
|
||||
zscanBufferStream(key, options) {
|
||||
return this.createScanStream("zscanBuffer", { key, options });
|
||||
}
|
||||
/**
|
||||
* Emit only when there's at least one listener.
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
silentEmit(eventName, arg) {
|
||||
let error;
|
||||
if (eventName === "error") {
|
||||
error = arg;
|
||||
if (this.status === "end") {
|
||||
return;
|
||||
}
|
||||
if (this.manuallyClosing) {
|
||||
// ignore connection related errors when manually disconnecting
|
||||
if (error instanceof Error &&
|
||||
(error.message === utils_1.CONNECTION_CLOSED_ERROR_MSG ||
|
||||
// @ts-expect-error
|
||||
error.syscall === "connect" ||
|
||||
// @ts-expect-error
|
||||
error.syscall === "read")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.listeners(eventName).length > 0) {
|
||||
return this.emit.apply(this, arguments);
|
||||
}
|
||||
if (error && error instanceof Error) {
|
||||
console.error("[ioredis] Unhandled error event:", error.stack);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
recoverFromFatalError(_commandError, err, options) {
|
||||
this.flushQueue(err, options);
|
||||
this.silentEmit("error", err);
|
||||
this.disconnect(true);
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
handleReconnection(err, item) {
|
||||
var _a;
|
||||
let needReconnect = false;
|
||||
if (this.options.reconnectOnError &&
|
||||
!Command_1.default.checkFlag("IGNORE_RECONNECT_ON_ERROR", item.command.name)) {
|
||||
needReconnect = this.options.reconnectOnError(err);
|
||||
}
|
||||
switch (needReconnect) {
|
||||
case 1:
|
||||
case true:
|
||||
if (this.status !== "reconnecting") {
|
||||
this.disconnect(true);
|
||||
}
|
||||
item.command.reject(err);
|
||||
break;
|
||||
case 2:
|
||||
if (this.status !== "reconnecting") {
|
||||
this.disconnect(true);
|
||||
}
|
||||
if (((_a = this.condition) === null || _a === void 0 ? void 0 : _a.select) !== item.select &&
|
||||
item.command.name !== "select") {
|
||||
this.select(item.select);
|
||||
}
|
||||
// TODO
|
||||
// @ts-expect-error
|
||||
this.sendCommand(item.command);
|
||||
break;
|
||||
default:
|
||||
item.command.reject(err);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get description of the connection. Used for debugging.
|
||||
*/
|
||||
_getDescription() {
|
||||
let description;
|
||||
if ("path" in this.options && this.options.path) {
|
||||
description = this.options.path;
|
||||
}
|
||||
else if (this.stream &&
|
||||
this.stream.remoteAddress &&
|
||||
this.stream.remotePort) {
|
||||
description = this.stream.remoteAddress + ":" + this.stream.remotePort;
|
||||
}
|
||||
else if ("host" in this.options && this.options.host) {
|
||||
description = this.options.host + ":" + this.options.port;
|
||||
}
|
||||
else {
|
||||
// Unexpected
|
||||
description = "";
|
||||
}
|
||||
if (this.options.connectionName) {
|
||||
description += ` (${this.options.connectionName})`;
|
||||
}
|
||||
return description;
|
||||
}
|
||||
resetCommandQueue() {
|
||||
this.commandQueue = new Deque();
|
||||
}
|
||||
resetOfflineQueue() {
|
||||
this.offlineQueue = new Deque();
|
||||
}
|
||||
parseOptions(...args) {
|
||||
const options = {};
|
||||
let isTls = false;
|
||||
for (let i = 0; i < args.length; ++i) {
|
||||
const arg = args[i];
|
||||
if (arg === null || typeof arg === "undefined") {
|
||||
continue;
|
||||
}
|
||||
if (typeof arg === "object") {
|
||||
(0, lodash_1.defaults)(options, arg);
|
||||
}
|
||||
else if (typeof arg === "string") {
|
||||
(0, lodash_1.defaults)(options, (0, utils_1.parseURL)(arg));
|
||||
if (arg.startsWith("rediss://")) {
|
||||
isTls = true;
|
||||
}
|
||||
}
|
||||
else if (typeof arg === "number") {
|
||||
options.port = arg;
|
||||
}
|
||||
else {
|
||||
throw new Error("Invalid argument " + arg);
|
||||
}
|
||||
}
|
||||
if (isTls) {
|
||||
(0, lodash_1.defaults)(options, { tls: true });
|
||||
}
|
||||
(0, lodash_1.defaults)(options, Redis.defaultOptions);
|
||||
if (typeof options.port === "string") {
|
||||
options.port = parseInt(options.port, 10);
|
||||
}
|
||||
if (typeof options.db === "string") {
|
||||
options.db = parseInt(options.db, 10);
|
||||
}
|
||||
// @ts-expect-error
|
||||
this.options = (0, utils_1.resolveTLSProfile)(options);
|
||||
}
|
||||
/**
|
||||
* Change instance's status
|
||||
*/
|
||||
setStatus(status, arg) {
|
||||
// @ts-expect-error
|
||||
if (debug.enabled) {
|
||||
debug("status[%s]: %s -> %s", this._getDescription(), this.status || "[empty]", status);
|
||||
}
|
||||
this.status = status;
|
||||
process.nextTick(this.emit.bind(this, status, arg));
|
||||
}
|
||||
createScanStream(command, { key, options = {} }) {
|
||||
return new ScanStream_1.default({
|
||||
objectMode: true,
|
||||
key: key,
|
||||
redis: this,
|
||||
command: command,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Flush offline queue and command queue with error.
|
||||
*
|
||||
* @param error The error object to send to the commands
|
||||
* @param options options
|
||||
*/
|
||||
flushQueue(error, options) {
|
||||
options = (0, lodash_1.defaults)({}, options, {
|
||||
offlineQueue: true,
|
||||
commandQueue: true,
|
||||
});
|
||||
let item;
|
||||
if (options.offlineQueue) {
|
||||
while ((item = this.offlineQueue.shift())) {
|
||||
item.command.reject(error);
|
||||
}
|
||||
}
|
||||
if (options.commandQueue) {
|
||||
if (this.commandQueue.length > 0) {
|
||||
if (this.stream) {
|
||||
this.stream.removeAllListeners("data");
|
||||
}
|
||||
while ((item = this.commandQueue.shift())) {
|
||||
item.command.reject(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Check whether Redis has finished loading the persistent data and is able to
|
||||
* process commands.
|
||||
*/
|
||||
_readyCheck(callback) {
|
||||
const _this = this;
|
||||
this.info(function (err, res) {
|
||||
if (err) {
|
||||
if (err.message && err.message.includes("NOPERM")) {
|
||||
console.warn(`Skipping the ready check because INFO command fails: "${err.message}". You can disable ready check with "enableReadyCheck". More: https://github.com/luin/ioredis/wiki/Disable-ready-check.`);
|
||||
return callback(null, {});
|
||||
}
|
||||
return callback(err);
|
||||
}
|
||||
if (typeof res !== "string") {
|
||||
return callback(null, res);
|
||||
}
|
||||
const info = {};
|
||||
const lines = res.split("\r\n");
|
||||
for (let i = 0; i < lines.length; ++i) {
|
||||
const [fieldName, ...fieldValueParts] = lines[i].split(":");
|
||||
const fieldValue = fieldValueParts.join(":");
|
||||
if (fieldValue) {
|
||||
info[fieldName] = fieldValue;
|
||||
}
|
||||
}
|
||||
if (!info.loading || info.loading === "0") {
|
||||
callback(null, info);
|
||||
}
|
||||
else {
|
||||
const loadingEtaMs = (info.loading_eta_seconds || 1) * 1000;
|
||||
const retryTime = _this.options.maxLoadingRetryTime &&
|
||||
_this.options.maxLoadingRetryTime < loadingEtaMs
|
||||
? _this.options.maxLoadingRetryTime
|
||||
: loadingEtaMs;
|
||||
debug("Redis server still loading, trying again in " + retryTime + "ms");
|
||||
setTimeout(function () {
|
||||
_this._readyCheck(callback);
|
||||
}, retryTime);
|
||||
}
|
||||
}).catch(lodash_1.noop);
|
||||
}
|
||||
}
|
||||
Redis.Cluster = cluster_1.default;
|
||||
Redis.Command = Command_1.default;
|
||||
/**
|
||||
* Default options
|
||||
*/
|
||||
Redis.defaultOptions = RedisOptions_1.DEFAULT_REDIS_OPTIONS;
|
||||
(0, applyMixin_1.default)(Redis, events_1.EventEmitter);
|
||||
(0, transaction_1.addTransactionSupport)(Redis.prototype);
|
||||
exports.default = Redis;
|
||||
23
node_modules/ioredis/built/ScanStream.d.ts
generated
vendored
Normal file
23
node_modules/ioredis/built/ScanStream.d.ts
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/// <reference types="node" />
|
||||
import { Readable, ReadableOptions } from "stream";
|
||||
interface Options extends ReadableOptions {
|
||||
key?: string;
|
||||
match?: string;
|
||||
type?: string;
|
||||
command: string;
|
||||
redis: any;
|
||||
count?: string | number;
|
||||
noValues?: boolean;
|
||||
}
|
||||
/**
|
||||
* Convenient class to convert the process of scanning keys to a readable stream.
|
||||
*/
|
||||
export default class ScanStream extends Readable {
|
||||
private opt;
|
||||
private _redisCursor;
|
||||
private _redisDrained;
|
||||
constructor(opt: Options);
|
||||
_read(): void;
|
||||
close(): void;
|
||||
}
|
||||
export {};
|
||||
51
node_modules/ioredis/built/ScanStream.js
generated
vendored
Normal file
51
node_modules/ioredis/built/ScanStream.js
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const stream_1 = require("stream");
|
||||
/**
|
||||
* Convenient class to convert the process of scanning keys to a readable stream.
|
||||
*/
|
||||
class ScanStream extends stream_1.Readable {
|
||||
constructor(opt) {
|
||||
super(opt);
|
||||
this.opt = opt;
|
||||
this._redisCursor = "0";
|
||||
this._redisDrained = false;
|
||||
}
|
||||
_read() {
|
||||
if (this._redisDrained) {
|
||||
this.push(null);
|
||||
return;
|
||||
}
|
||||
const args = [this._redisCursor];
|
||||
if (this.opt.key) {
|
||||
args.unshift(this.opt.key);
|
||||
}
|
||||
if (this.opt.match) {
|
||||
args.push("MATCH", this.opt.match);
|
||||
}
|
||||
if (this.opt.type) {
|
||||
args.push("TYPE", this.opt.type);
|
||||
}
|
||||
if (this.opt.count) {
|
||||
args.push("COUNT", String(this.opt.count));
|
||||
}
|
||||
if (this.opt.noValues) {
|
||||
args.push("NOVALUES");
|
||||
}
|
||||
this.opt.redis[this.opt.command](args, (err, res) => {
|
||||
if (err) {
|
||||
this.emit("error", err);
|
||||
return;
|
||||
}
|
||||
this._redisCursor = res[0] instanceof Buffer ? res[0].toString() : res[0];
|
||||
if (this._redisCursor === "0") {
|
||||
this._redisDrained = true;
|
||||
}
|
||||
this.push(res[1]);
|
||||
});
|
||||
}
|
||||
close() {
|
||||
this._redisDrained = true;
|
||||
}
|
||||
}
|
||||
exports.default = ScanStream;
|
||||
11
node_modules/ioredis/built/Script.d.ts
generated
vendored
Normal file
11
node_modules/ioredis/built/Script.d.ts
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
import { Callback } from "./types";
|
||||
export default class Script {
|
||||
private lua;
|
||||
private numberOfKeys;
|
||||
private keyPrefix;
|
||||
private readOnly;
|
||||
private sha;
|
||||
private Command;
|
||||
constructor(lua: string, numberOfKeys?: number | null, keyPrefix?: string, readOnly?: boolean);
|
||||
execute(container: any, args: any[], options: any, callback?: Callback): any;
|
||||
}
|
||||
62
node_modules/ioredis/built/Script.js
generated
vendored
Normal file
62
node_modules/ioredis/built/Script.js
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const crypto_1 = require("crypto");
|
||||
const Command_1 = require("./Command");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
class Script {
|
||||
constructor(lua, numberOfKeys = null, keyPrefix = "", readOnly = false) {
|
||||
this.lua = lua;
|
||||
this.numberOfKeys = numberOfKeys;
|
||||
this.keyPrefix = keyPrefix;
|
||||
this.readOnly = readOnly;
|
||||
this.sha = (0, crypto_1.createHash)("sha1").update(lua).digest("hex");
|
||||
const sha = this.sha;
|
||||
const socketHasScriptLoaded = new WeakSet();
|
||||
this.Command = class CustomScriptCommand extends Command_1.default {
|
||||
toWritable(socket) {
|
||||
const origReject = this.reject;
|
||||
this.reject = (err) => {
|
||||
if (err.message.indexOf("NOSCRIPT") !== -1) {
|
||||
socketHasScriptLoaded.delete(socket);
|
||||
}
|
||||
origReject.call(this, err);
|
||||
};
|
||||
if (!socketHasScriptLoaded.has(socket)) {
|
||||
socketHasScriptLoaded.add(socket);
|
||||
this.name = "eval";
|
||||
this.args[0] = lua;
|
||||
}
|
||||
else if (this.name === "eval") {
|
||||
this.name = "evalsha";
|
||||
this.args[0] = sha;
|
||||
}
|
||||
return super.toWritable(socket);
|
||||
}
|
||||
};
|
||||
}
|
||||
execute(container, args, options, callback) {
|
||||
if (typeof this.numberOfKeys === "number") {
|
||||
args.unshift(this.numberOfKeys);
|
||||
}
|
||||
if (this.keyPrefix) {
|
||||
options.keyPrefix = this.keyPrefix;
|
||||
}
|
||||
if (this.readOnly) {
|
||||
options.readOnly = true;
|
||||
}
|
||||
const evalsha = new this.Command("evalsha", [this.sha, ...args], options);
|
||||
evalsha.promise = evalsha.promise.catch((err) => {
|
||||
if (err.message.indexOf("NOSCRIPT") === -1) {
|
||||
throw err;
|
||||
}
|
||||
// Resend the same custom evalsha command that gets transformed
|
||||
// to an eval in case it's not loaded yet on the connection.
|
||||
const resend = new this.Command("evalsha", [this.sha, ...args], options);
|
||||
const client = container.isPipeline ? container.redis : container;
|
||||
return client.sendCommand(resend);
|
||||
});
|
||||
(0, standard_as_callback_1.default)(evalsha.promise, callback);
|
||||
return container.sendCommand(evalsha);
|
||||
}
|
||||
}
|
||||
exports.default = Script;
|
||||
14
node_modules/ioredis/built/SubscriptionSet.d.ts
generated
vendored
Normal file
14
node_modules/ioredis/built/SubscriptionSet.d.ts
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import { CommandNameFlags } from "./Command";
|
||||
declare type AddSet = CommandNameFlags["ENTER_SUBSCRIBER_MODE"][number];
|
||||
declare type DelSet = CommandNameFlags["EXIT_SUBSCRIBER_MODE"][number];
|
||||
/**
|
||||
* Tiny class to simplify dealing with subscription set
|
||||
*/
|
||||
export default class SubscriptionSet {
|
||||
private set;
|
||||
add(set: AddSet, channel: string): void;
|
||||
del(set: DelSet, channel: string): void;
|
||||
channels(set: AddSet | DelSet): string[];
|
||||
isEmpty(): boolean;
|
||||
}
|
||||
export {};
|
||||
41
node_modules/ioredis/built/SubscriptionSet.js
generated
vendored
Normal file
41
node_modules/ioredis/built/SubscriptionSet.js
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* Tiny class to simplify dealing with subscription set
|
||||
*/
|
||||
class SubscriptionSet {
|
||||
constructor() {
|
||||
this.set = {
|
||||
subscribe: {},
|
||||
psubscribe: {},
|
||||
ssubscribe: {},
|
||||
};
|
||||
}
|
||||
add(set, channel) {
|
||||
this.set[mapSet(set)][channel] = true;
|
||||
}
|
||||
del(set, channel) {
|
||||
delete this.set[mapSet(set)][channel];
|
||||
}
|
||||
channels(set) {
|
||||
return Object.keys(this.set[mapSet(set)]);
|
||||
}
|
||||
isEmpty() {
|
||||
return (this.channels("subscribe").length === 0 &&
|
||||
this.channels("psubscribe").length === 0 &&
|
||||
this.channels("ssubscribe").length === 0);
|
||||
}
|
||||
}
|
||||
exports.default = SubscriptionSet;
|
||||
function mapSet(set) {
|
||||
if (set === "unsubscribe") {
|
||||
return "subscribe";
|
||||
}
|
||||
if (set === "punsubscribe") {
|
||||
return "psubscribe";
|
||||
}
|
||||
if (set === "sunsubscribe") {
|
||||
return "ssubscribe";
|
||||
}
|
||||
return set;
|
||||
}
|
||||
8
node_modules/ioredis/built/autoPipelining.d.ts
generated
vendored
Normal file
8
node_modules/ioredis/built/autoPipelining.d.ts
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
/// <reference types="node" />
|
||||
import { ArgumentType } from "./Command";
|
||||
export declare const kExec: unique symbol;
|
||||
export declare const kCallbacks: unique symbol;
|
||||
export declare const notAllowedAutoPipelineCommands: string[];
|
||||
export declare function shouldUseAutoPipelining(client: any, functionName: string, commandName: string): boolean;
|
||||
export declare function getFirstValueInFlattenedArray(args: ArgumentType[]): string | Buffer | number | null | undefined;
|
||||
export declare function executeWithAutoPipelining(client: any, functionName: string, commandName: string, args: ArgumentType[], callback: any): Promise<unknown>;
|
||||
167
node_modules/ioredis/built/autoPipelining.js
generated
vendored
Normal file
167
node_modules/ioredis/built/autoPipelining.js
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.executeWithAutoPipelining = exports.getFirstValueInFlattenedArray = exports.shouldUseAutoPipelining = exports.notAllowedAutoPipelineCommands = exports.kCallbacks = exports.kExec = void 0;
|
||||
const lodash_1 = require("./utils/lodash");
|
||||
const calculateSlot = require("cluster-key-slot");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
exports.kExec = Symbol("exec");
|
||||
exports.kCallbacks = Symbol("callbacks");
|
||||
exports.notAllowedAutoPipelineCommands = [
|
||||
"auth",
|
||||
"info",
|
||||
"script",
|
||||
"quit",
|
||||
"cluster",
|
||||
"pipeline",
|
||||
"multi",
|
||||
"subscribe",
|
||||
"psubscribe",
|
||||
"unsubscribe",
|
||||
"unpsubscribe",
|
||||
"select",
|
||||
"client",
|
||||
];
|
||||
function executeAutoPipeline(client, slotKey) {
|
||||
/*
|
||||
If a pipeline is already executing, keep queueing up commands
|
||||
since ioredis won't serve two pipelines at the same time
|
||||
*/
|
||||
if (client._runningAutoPipelines.has(slotKey)) {
|
||||
return;
|
||||
}
|
||||
if (!client._autoPipelines.has(slotKey)) {
|
||||
/*
|
||||
Rare edge case. Somehow, something has deleted this running autopipeline in an immediate
|
||||
call to executeAutoPipeline.
|
||||
|
||||
Maybe the callback in the pipeline.exec is sometimes called in the same tick,
|
||||
e.g. if redis is disconnected?
|
||||
*/
|
||||
return;
|
||||
}
|
||||
client._runningAutoPipelines.add(slotKey);
|
||||
// Get the pipeline and immediately delete it so that new commands are queued on a new pipeline
|
||||
const pipeline = client._autoPipelines.get(slotKey);
|
||||
client._autoPipelines.delete(slotKey);
|
||||
const callbacks = pipeline[exports.kCallbacks];
|
||||
// Stop keeping a reference to callbacks immediately after the callbacks stop being used.
|
||||
// This allows the GC to reclaim objects referenced by callbacks, especially with 16384 slots
|
||||
// in Redis.Cluster
|
||||
pipeline[exports.kCallbacks] = null;
|
||||
// Perform the call
|
||||
pipeline.exec(function (err, results) {
|
||||
client._runningAutoPipelines.delete(slotKey);
|
||||
/*
|
||||
Invoke all callback in nextTick so the stack is cleared
|
||||
and callbacks can throw errors without affecting other callbacks.
|
||||
*/
|
||||
if (err) {
|
||||
for (let i = 0; i < callbacks.length; i++) {
|
||||
process.nextTick(callbacks[i], err);
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (let i = 0; i < callbacks.length; i++) {
|
||||
process.nextTick(callbacks[i], ...results[i]);
|
||||
}
|
||||
}
|
||||
// If there is another pipeline on the same node, immediately execute it without waiting for nextTick
|
||||
if (client._autoPipelines.has(slotKey)) {
|
||||
executeAutoPipeline(client, slotKey);
|
||||
}
|
||||
});
|
||||
}
|
||||
function shouldUseAutoPipelining(client, functionName, commandName) {
|
||||
return (functionName &&
|
||||
client.options.enableAutoPipelining &&
|
||||
!client.isPipeline &&
|
||||
!exports.notAllowedAutoPipelineCommands.includes(commandName) &&
|
||||
!client.options.autoPipeliningIgnoredCommands.includes(commandName));
|
||||
}
|
||||
exports.shouldUseAutoPipelining = shouldUseAutoPipelining;
|
||||
function getFirstValueInFlattenedArray(args) {
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i];
|
||||
if (typeof arg === "string") {
|
||||
return arg;
|
||||
}
|
||||
else if (Array.isArray(arg) || (0, lodash_1.isArguments)(arg)) {
|
||||
if (arg.length === 0) {
|
||||
continue;
|
||||
}
|
||||
return arg[0];
|
||||
}
|
||||
const flattened = [arg].flat();
|
||||
if (flattened.length > 0) {
|
||||
return flattened[0];
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
exports.getFirstValueInFlattenedArray = getFirstValueInFlattenedArray;
|
||||
function executeWithAutoPipelining(client, functionName, commandName, args, callback) {
|
||||
// On cluster mode let's wait for slots to be available
|
||||
if (client.isCluster && !client.slots.length) {
|
||||
if (client.status === "wait")
|
||||
client.connect().catch(lodash_1.noop);
|
||||
return (0, standard_as_callback_1.default)(new Promise(function (resolve, reject) {
|
||||
client.delayUntilReady((err) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
executeWithAutoPipelining(client, functionName, commandName, args, null).then(resolve, reject);
|
||||
});
|
||||
}), callback);
|
||||
}
|
||||
// If we have slot information, we can improve routing by grouping slots served by the same subset of nodes
|
||||
// Note that the first value in args may be a (possibly empty) array.
|
||||
// ioredis will only flatten one level of the array, in the Command constructor.
|
||||
const prefix = client.options.keyPrefix || "";
|
||||
let slotKey = client.isCluster
|
||||
? client.slots[calculateSlot(`${prefix}${getFirstValueInFlattenedArray(args)}`)].join(",")
|
||||
: "main";
|
||||
// When scaleReads is enabled, separate read and write commands into different pipelines
|
||||
// so they can be routed to replicas and masters respectively
|
||||
if (client.isCluster && client.options.scaleReads !== "master") {
|
||||
const isReadOnly = (0, commands_1.exists)(commandName) && (0, commands_1.hasFlag)(commandName, "readonly");
|
||||
slotKey += isReadOnly ? ":read" : ":write";
|
||||
}
|
||||
if (!client._autoPipelines.has(slotKey)) {
|
||||
const pipeline = client.pipeline();
|
||||
pipeline[exports.kExec] = false;
|
||||
pipeline[exports.kCallbacks] = [];
|
||||
client._autoPipelines.set(slotKey, pipeline);
|
||||
}
|
||||
const pipeline = client._autoPipelines.get(slotKey);
|
||||
/*
|
||||
Mark the pipeline as scheduled.
|
||||
The symbol will make sure that the pipeline is only scheduled once per tick.
|
||||
New commands are appended to an already scheduled pipeline.
|
||||
*/
|
||||
if (!pipeline[exports.kExec]) {
|
||||
pipeline[exports.kExec] = true;
|
||||
/*
|
||||
Deferring with setImmediate so we have a chance to capture multiple
|
||||
commands that can be scheduled by I/O events already in the event loop queue.
|
||||
*/
|
||||
setImmediate(executeAutoPipeline, client, slotKey);
|
||||
}
|
||||
// Create the promise which will execute the command in the pipeline.
|
||||
const autoPipelinePromise = new Promise(function (resolve, reject) {
|
||||
pipeline[exports.kCallbacks].push(function (err, value) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
resolve(value);
|
||||
});
|
||||
if (functionName === "call") {
|
||||
args.unshift(commandName);
|
||||
}
|
||||
pipeline[functionName](...args);
|
||||
});
|
||||
return (0, standard_as_callback_1.default)(autoPipelinePromise, callback);
|
||||
}
|
||||
exports.executeWithAutoPipelining = executeWithAutoPipelining;
|
||||
172
node_modules/ioredis/built/cluster/ClusterOptions.d.ts
generated
vendored
Normal file
172
node_modules/ioredis/built/cluster/ClusterOptions.d.ts
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
/// <reference types="node" />
|
||||
import { SrvRecord } from "dns";
|
||||
import { RedisOptions } from "../redis/RedisOptions";
|
||||
import { CommanderOptions } from "../utils/Commander";
|
||||
import { NodeRole } from "./util";
|
||||
export declare type DNSResolveSrvFunction = (hostname: string, callback: (err: NodeJS.ErrnoException | null | undefined, records?: SrvRecord[]) => void) => void;
|
||||
export declare type DNSLookupFunction = (hostname: string, callback: (err: NodeJS.ErrnoException | null | undefined, address: string, family?: number) => void) => void;
|
||||
export declare type NatMapFunction = (key: string) => {
|
||||
host: string;
|
||||
port: number;
|
||||
} | null;
|
||||
export declare type NatMap = {
|
||||
[key: string]: {
|
||||
host: string;
|
||||
port: number;
|
||||
};
|
||||
} | NatMapFunction;
|
||||
/**
|
||||
* Options for Cluster constructor
|
||||
*/
|
||||
export interface ClusterOptions extends CommanderOptions {
|
||||
/**
|
||||
* See "Quick Start" section.
|
||||
*
|
||||
* @default (times) => Math.min(100 + times * 2, 2000)
|
||||
*/
|
||||
clusterRetryStrategy?: ((times: number, reason?: Error) => number | void | null) | undefined;
|
||||
/**
|
||||
* See Redis class.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
enableOfflineQueue?: boolean | undefined;
|
||||
/**
|
||||
* When enabled, ioredis only emits "ready" event when `CLUSTER INFO`
|
||||
* command reporting the cluster is ready for handling commands.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
enableReadyCheck?: boolean | undefined;
|
||||
/**
|
||||
* Scale reads to the node with the specified role.
|
||||
*
|
||||
* @default "master"
|
||||
*/
|
||||
scaleReads?: NodeRole | Function | undefined;
|
||||
/**
|
||||
* When a MOVED or ASK error is received, client will redirect the
|
||||
* command to another node.
|
||||
* This option limits the max redirections allowed to send a command.
|
||||
*
|
||||
* @default 16
|
||||
*/
|
||||
maxRedirections?: number | undefined;
|
||||
/**
|
||||
* When an error is received when sending a command (e.g.
|
||||
* "Connection is closed." when the target Redis node is down), client will retry
|
||||
* if `retryDelayOnFailover` is valid delay time (in ms).
|
||||
*
|
||||
* @default 100
|
||||
*/
|
||||
retryDelayOnFailover?: number | undefined;
|
||||
/**
|
||||
* When a CLUSTERDOWN error is received, client will retry
|
||||
* if `retryDelayOnClusterDown` is valid delay time (in ms).
|
||||
*
|
||||
* @default 100
|
||||
*/
|
||||
retryDelayOnClusterDown?: number | undefined;
|
||||
/**
|
||||
* When a TRYAGAIN error is received, client will retry
|
||||
* if `retryDelayOnTryAgain` is valid delay time (in ms).
|
||||
*
|
||||
* @default 100
|
||||
*/
|
||||
retryDelayOnTryAgain?: number | undefined;
|
||||
/**
|
||||
* By default, this value is 0, which means when a `MOVED` error is received,
|
||||
* the client will resend the command instantly to the node returned together with
|
||||
* the `MOVED` error. However, sometimes it takes time for a cluster to become
|
||||
* state stabilized after a failover, so adding a delay before resending can
|
||||
* prevent a ping pong effect.
|
||||
*
|
||||
* @default 0
|
||||
*/
|
||||
retryDelayOnMoved?: number | undefined;
|
||||
/**
|
||||
* The milliseconds before a timeout occurs while refreshing
|
||||
* slots from the cluster.
|
||||
*
|
||||
* @default 1000
|
||||
*/
|
||||
slotsRefreshTimeout?: number | undefined;
|
||||
/**
|
||||
* The milliseconds between every automatic slots refresh.
|
||||
*
|
||||
* @default 5000
|
||||
*/
|
||||
slotsRefreshInterval?: number | undefined;
|
||||
/**
|
||||
* Use sharded subscribers instead of a single subscriber.
|
||||
*
|
||||
* If sharded subscribers are used, then one additional subscriber connection per master node
|
||||
* is established. If you don't plan to use SPUBLISH/SSUBSCRIBE, then this should be disabled.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
shardedSubscribers?: boolean | undefined;
|
||||
/**
|
||||
* Passed to the constructor of `Redis`
|
||||
*
|
||||
* @default null
|
||||
*/
|
||||
redisOptions?: Omit<RedisOptions, "port" | "host" | "path" | "sentinels" | "retryStrategy" | "enableOfflineQueue" | "readOnly"> | undefined;
|
||||
/**
|
||||
* By default, When a new Cluster instance is created,
|
||||
* it will connect to the Redis cluster automatically.
|
||||
* If you want to keep the instance disconnected until the first command is called,
|
||||
* set this option to `true`.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
lazyConnect?: boolean | undefined;
|
||||
/**
|
||||
* Discover nodes using SRV records
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
useSRVRecords?: boolean | undefined;
|
||||
/**
|
||||
* SRV records will be resolved via this function.
|
||||
*
|
||||
* You may provide a custom `resolveSrv` function when you want to customize
|
||||
* the cache behavior of the default function.
|
||||
*
|
||||
* @default require('dns').resolveSrv
|
||||
*/
|
||||
resolveSrv?: DNSResolveSrvFunction | undefined;
|
||||
/**
|
||||
* Hostnames will be resolved to IP addresses via this function.
|
||||
* This is needed when the addresses of startup nodes are hostnames instead
|
||||
* of IPs.
|
||||
*
|
||||
* You may provide a custom `lookup` function when you want to customize
|
||||
* the cache behavior of the default function.
|
||||
*
|
||||
* @default require('dns').lookup
|
||||
*/
|
||||
dnsLookup?: DNSLookupFunction | undefined;
|
||||
natMap?: NatMap | undefined;
|
||||
/**
|
||||
* See Redis class.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
enableAutoPipelining?: boolean | undefined;
|
||||
/**
|
||||
* See Redis class.
|
||||
*
|
||||
* @default []
|
||||
*/
|
||||
autoPipeliningIgnoredCommands?: string[] | undefined;
|
||||
/**
|
||||
* Custom LUA commands
|
||||
*/
|
||||
scripts?: Record<string, {
|
||||
lua: string;
|
||||
numberOfKeys?: number;
|
||||
readOnly?: boolean;
|
||||
}> | undefined;
|
||||
}
|
||||
export declare const DEFAULT_CLUSTER_OPTIONS: ClusterOptions;
|
||||
22
node_modules/ioredis/built/cluster/ClusterOptions.js
generated
vendored
Normal file
22
node_modules/ioredis/built/cluster/ClusterOptions.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DEFAULT_CLUSTER_OPTIONS = void 0;
|
||||
const dns_1 = require("dns");
|
||||
exports.DEFAULT_CLUSTER_OPTIONS = {
|
||||
clusterRetryStrategy: (times) => Math.min(100 + times * 2, 2000),
|
||||
enableOfflineQueue: true,
|
||||
enableReadyCheck: true,
|
||||
scaleReads: "master",
|
||||
maxRedirections: 16,
|
||||
retryDelayOnMoved: 0,
|
||||
retryDelayOnFailover: 100,
|
||||
retryDelayOnClusterDown: 100,
|
||||
retryDelayOnTryAgain: 100,
|
||||
slotsRefreshTimeout: 1000,
|
||||
useSRVRecords: false,
|
||||
resolveSrv: dns_1.resolveSrv,
|
||||
dnsLookup: dns_1.lookup,
|
||||
enableAutoPipelining: false,
|
||||
autoPipeliningIgnoredCommands: [],
|
||||
shardedSubscribers: false,
|
||||
};
|
||||
29
node_modules/ioredis/built/cluster/ClusterSubscriber.d.ts
generated
vendored
Normal file
29
node_modules/ioredis/built/cluster/ClusterSubscriber.d.ts
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import ConnectionPool from "./ConnectionPool";
|
||||
export default class ClusterSubscriber {
|
||||
private connectionPool;
|
||||
private emitter;
|
||||
private isSharded;
|
||||
private started;
|
||||
private subscriber;
|
||||
private lastActiveSubscriber;
|
||||
private slotRange;
|
||||
constructor(connectionPool: ConnectionPool, emitter: EventEmitter, isSharded?: boolean);
|
||||
getInstance(): any;
|
||||
/**
|
||||
* Associate this subscriber to a specific slot range.
|
||||
*
|
||||
* Returns the range or an empty array if the slot range couldn't be associated.
|
||||
*
|
||||
* BTW: This is more for debugging and testing purposes.
|
||||
*
|
||||
* @param range
|
||||
*/
|
||||
associateSlotRange(range: number[]): number[];
|
||||
start(): void;
|
||||
stop(): void;
|
||||
isStarted(): boolean;
|
||||
private onSubscriberEnd;
|
||||
private selectSubscriber;
|
||||
}
|
||||
223
node_modules/ioredis/built/cluster/ClusterSubscriber.js
generated
vendored
Normal file
223
node_modules/ioredis/built/cluster/ClusterSubscriber.js
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const util_1 = require("./util");
|
||||
const utils_1 = require("../utils");
|
||||
const Redis_1 = require("../Redis");
|
||||
const debug = (0, utils_1.Debug)("cluster:subscriber");
|
||||
class ClusterSubscriber {
|
||||
constructor(connectionPool, emitter, isSharded = false) {
|
||||
this.connectionPool = connectionPool;
|
||||
this.emitter = emitter;
|
||||
this.isSharded = isSharded;
|
||||
this.started = false;
|
||||
//There is only one connection for the entire pool
|
||||
this.subscriber = null;
|
||||
//The slot range for which this subscriber is responsible
|
||||
this.slotRange = [];
|
||||
this.onSubscriberEnd = () => {
|
||||
if (!this.started) {
|
||||
debug("subscriber has disconnected, but ClusterSubscriber is not started, so not reconnecting.");
|
||||
return;
|
||||
}
|
||||
// If the subscriber closes whilst it's still the active connection,
|
||||
// we might as well try to connecting to a new node if possible to
|
||||
// minimise the number of missed publishes.
|
||||
debug("subscriber has disconnected, selecting a new one...");
|
||||
this.selectSubscriber();
|
||||
};
|
||||
// If the current node we're using as the subscriber disappears
|
||||
// from the node pool for some reason, we will select a new one
|
||||
// to connect to.
|
||||
// Note that this event is only triggered if the connection to
|
||||
// the node has been used; cluster subscriptions are setup with
|
||||
// lazyConnect = true. It's possible for the subscriber node to
|
||||
// disappear without this method being called!
|
||||
// See https://github.com/luin/ioredis/pull/1589
|
||||
this.connectionPool.on("-node", (_, key) => {
|
||||
if (!this.started || !this.subscriber) {
|
||||
return;
|
||||
}
|
||||
if ((0, util_1.getNodeKey)(this.subscriber.options) === key) {
|
||||
debug("subscriber has left, selecting a new one...");
|
||||
this.selectSubscriber();
|
||||
}
|
||||
});
|
||||
this.connectionPool.on("+node", () => {
|
||||
if (!this.started || this.subscriber) {
|
||||
return;
|
||||
}
|
||||
debug("a new node is discovered and there is no subscriber, selecting a new one...");
|
||||
this.selectSubscriber();
|
||||
});
|
||||
}
|
||||
getInstance() {
|
||||
return this.subscriber;
|
||||
}
|
||||
/**
|
||||
* Associate this subscriber to a specific slot range.
|
||||
*
|
||||
* Returns the range or an empty array if the slot range couldn't be associated.
|
||||
*
|
||||
* BTW: This is more for debugging and testing purposes.
|
||||
*
|
||||
* @param range
|
||||
*/
|
||||
associateSlotRange(range) {
|
||||
if (this.isSharded) {
|
||||
this.slotRange = range;
|
||||
}
|
||||
return this.slotRange;
|
||||
}
|
||||
start() {
|
||||
this.started = true;
|
||||
this.selectSubscriber();
|
||||
debug("started");
|
||||
}
|
||||
stop() {
|
||||
this.started = false;
|
||||
if (this.subscriber) {
|
||||
this.subscriber.disconnect();
|
||||
this.subscriber = null;
|
||||
}
|
||||
}
|
||||
isStarted() {
|
||||
return this.started;
|
||||
}
|
||||
selectSubscriber() {
|
||||
const lastActiveSubscriber = this.lastActiveSubscriber;
|
||||
// Disconnect the previous subscriber even if there
|
||||
// will not be a new one.
|
||||
if (lastActiveSubscriber) {
|
||||
lastActiveSubscriber.off("end", this.onSubscriberEnd);
|
||||
lastActiveSubscriber.disconnect();
|
||||
}
|
||||
if (this.subscriber) {
|
||||
this.subscriber.off("end", this.onSubscriberEnd);
|
||||
this.subscriber.disconnect();
|
||||
}
|
||||
const sampleNode = (0, utils_1.sample)(this.connectionPool.getNodes());
|
||||
if (!sampleNode) {
|
||||
debug("selecting subscriber failed since there is no node discovered in the cluster yet");
|
||||
this.subscriber = null;
|
||||
return;
|
||||
}
|
||||
const { options } = sampleNode;
|
||||
debug("selected a subscriber %s:%s", options.host, options.port);
|
||||
/*
|
||||
* Create a specialized Redis connection for the subscription.
|
||||
* Note that auto reconnection is enabled here.
|
||||
*
|
||||
* `enableReadyCheck` is also enabled because although subscription is allowed
|
||||
* while redis is loading data from the disk, we can check if the password
|
||||
* provided for the subscriber is correct, and if not, the current subscriber
|
||||
* will be disconnected and a new subscriber will be selected.
|
||||
*/
|
||||
let connectionPrefix = "subscriber";
|
||||
if (this.isSharded)
|
||||
connectionPrefix = "ssubscriber";
|
||||
this.subscriber = new Redis_1.default({
|
||||
port: options.port,
|
||||
host: options.host,
|
||||
username: options.username,
|
||||
password: options.password,
|
||||
enableReadyCheck: true,
|
||||
connectionName: (0, util_1.getConnectionName)(connectionPrefix, options.connectionName),
|
||||
lazyConnect: true,
|
||||
tls: options.tls,
|
||||
// Don't try to reconnect the subscriber connection. If the connection fails
|
||||
// we will get an end event (handled below), at which point we'll pick a new
|
||||
// node from the pool and try to connect to that as the subscriber connection.
|
||||
retryStrategy: null,
|
||||
});
|
||||
// Ignore the errors since they're handled in the connection pool.
|
||||
this.subscriber.on("error", utils_1.noop);
|
||||
this.subscriber.on("moved", () => {
|
||||
this.emitter.emit("forceRefresh");
|
||||
});
|
||||
// The node we lost connection to may not come back up in a
|
||||
// reasonable amount of time (e.g. a slave that's taken down
|
||||
// for maintainence), we could potentially miss many published
|
||||
// messages so we should reconnect as quickly as possible, to
|
||||
// a different node if needed.
|
||||
this.subscriber.once("end", this.onSubscriberEnd);
|
||||
// Re-subscribe previous channels
|
||||
const previousChannels = { subscribe: [], psubscribe: [], ssubscribe: [] };
|
||||
if (lastActiveSubscriber) {
|
||||
const condition = lastActiveSubscriber.condition || lastActiveSubscriber.prevCondition;
|
||||
if (condition && condition.subscriber) {
|
||||
previousChannels.subscribe = condition.subscriber.channels("subscribe");
|
||||
previousChannels.psubscribe =
|
||||
condition.subscriber.channels("psubscribe");
|
||||
previousChannels.ssubscribe =
|
||||
condition.subscriber.channels("ssubscribe");
|
||||
}
|
||||
}
|
||||
if (previousChannels.subscribe.length ||
|
||||
previousChannels.psubscribe.length ||
|
||||
previousChannels.ssubscribe.length) {
|
||||
let pending = 0;
|
||||
for (const type of ["subscribe", "psubscribe", "ssubscribe"]) {
|
||||
const channels = previousChannels[type];
|
||||
if (channels.length == 0) {
|
||||
continue;
|
||||
}
|
||||
debug("%s %d channels", type, channels.length);
|
||||
if (type === "ssubscribe") {
|
||||
for (const channel of channels) {
|
||||
pending += 1;
|
||||
this.subscriber[type](channel)
|
||||
.then(() => {
|
||||
if (!--pending) {
|
||||
this.lastActiveSubscriber = this.subscriber;
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
// TODO: should probably disconnect the subscriber and try again.
|
||||
debug("failed to ssubscribe to channel: %s", channel);
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
pending += 1;
|
||||
this.subscriber[type](channels)
|
||||
.then(() => {
|
||||
if (!--pending) {
|
||||
this.lastActiveSubscriber = this.subscriber;
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
// TODO: should probably disconnect the subscriber and try again.
|
||||
debug("failed to %s %d channels", type, channels.length);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.lastActiveSubscriber = this.subscriber;
|
||||
}
|
||||
for (const event of [
|
||||
"message",
|
||||
"messageBuffer",
|
||||
]) {
|
||||
this.subscriber.on(event, (arg1, arg2) => {
|
||||
this.emitter.emit(event, arg1, arg2);
|
||||
});
|
||||
}
|
||||
for (const event of ["pmessage", "pmessageBuffer"]) {
|
||||
this.subscriber.on(event, (arg1, arg2, arg3) => {
|
||||
this.emitter.emit(event, arg1, arg2, arg3);
|
||||
});
|
||||
}
|
||||
if (this.isSharded == true) {
|
||||
for (const event of [
|
||||
"smessage",
|
||||
"smessageBuffer",
|
||||
]) {
|
||||
this.subscriber.on(event, (arg1, arg2) => {
|
||||
this.emitter.emit(event, arg1, arg2);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.default = ClusterSubscriber;
|
||||
108
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.d.ts
generated
vendored
Normal file
108
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.d.ts
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
/// <reference types="node" />
|
||||
import * as EventEmitter from "events";
|
||||
import ShardedSubscriber from "./ShardedSubscriber";
|
||||
import { ClusterOptions } from "./ClusterOptions";
|
||||
/**
|
||||
* Redis distinguishes between "normal" and sharded PubSub. When using the normal PubSub feature,
|
||||
* exactly one subscriber exists per cluster instance because the Redis cluster bus forwards
|
||||
* messages between shards. Sharded PubSub removes this limitation by making each shard
|
||||
* responsible for its own messages.
|
||||
*
|
||||
* This class coordinates one ShardedSubscriber per master node in the cluster, providing
|
||||
* sharded PubSub support while keeping the public API backward compatible.
|
||||
*/
|
||||
export default class ClusterSubscriberGroup {
|
||||
private readonly subscriberGroupEmitter;
|
||||
private readonly options;
|
||||
private shardedSubscribers;
|
||||
private clusterSlots;
|
||||
private subscriberToSlotsIndex;
|
||||
private channels;
|
||||
private failedAttemptsByNode;
|
||||
private isResetting;
|
||||
private pendingReset;
|
||||
private static readonly MAX_RETRY_ATTEMPTS;
|
||||
private static readonly MAX_BACKOFF_MS;
|
||||
private static readonly BASE_BACKOFF_MS;
|
||||
/**
|
||||
* Register callbacks
|
||||
*
|
||||
* @param cluster
|
||||
*/
|
||||
constructor(subscriberGroupEmitter: EventEmitter, options: ClusterOptions);
|
||||
/**
|
||||
* Get the responsible subscriber.
|
||||
*
|
||||
* @param slot
|
||||
*/
|
||||
getResponsibleSubscriber(slot: number): ShardedSubscriber | undefined;
|
||||
/**
|
||||
* Adds a channel for which this subscriber group is responsible
|
||||
*
|
||||
* @param channels
|
||||
*/
|
||||
addChannels(channels: (string | Buffer)[]): number;
|
||||
/**
|
||||
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
||||
* @param channels
|
||||
*/
|
||||
removeChannels(channels: (string | Buffer)[]): number;
|
||||
/**
|
||||
* Disconnect all subscribers and clear some of the internal state.
|
||||
*/
|
||||
stop(): void;
|
||||
/**
|
||||
* Start all not yet started subscribers
|
||||
*/
|
||||
start(): Promise<any[]>;
|
||||
/**
|
||||
* Resets the subscriber group by disconnecting all subscribers that are no longer needed and connecting new ones.
|
||||
*/
|
||||
reset(clusterSlots: string[][], clusterNodes: any[]): Promise<void>;
|
||||
/**
|
||||
* Refreshes the subscriber-related slot ranges
|
||||
*
|
||||
* Returns false if no refresh was needed
|
||||
*
|
||||
* @param targetSlots
|
||||
*/
|
||||
private _refreshSlots;
|
||||
/**
|
||||
* Resubscribes to the previous channels
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
private _resubscribe;
|
||||
/**
|
||||
* Deep equality of the cluster slots objects
|
||||
*
|
||||
* @param other
|
||||
* @private
|
||||
*/
|
||||
private _slotsAreEqual;
|
||||
/**
|
||||
* Checks if any subscribers are in an unhealthy state.
|
||||
*
|
||||
* A subscriber is considered unhealthy if:
|
||||
* - It exists but is not started (failed/disconnected)
|
||||
* - It's missing entirely for a node that should have one
|
||||
*
|
||||
* @returns true if any subscribers need to be recreated
|
||||
*/
|
||||
private hasUnhealthySubscribers;
|
||||
/**
|
||||
* Handles failed subscriber connections by emitting an event to refresh the slots cache
|
||||
* after a backoff period.
|
||||
*
|
||||
* @param error
|
||||
* @param nodeKey
|
||||
*/
|
||||
private handleSubscriberConnectFailed;
|
||||
/**
|
||||
* Handles successful subscriber connections by resetting the failed attempts counter.
|
||||
*
|
||||
* @param nodeKey
|
||||
*/
|
||||
private handleSubscriberConnectSucceeded;
|
||||
private shouldStartSubscriber;
|
||||
}
|
||||
373
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.js
generated
vendored
Normal file
373
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.js
generated
vendored
Normal file
@@ -0,0 +1,373 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const utils_1 = require("../utils");
|
||||
const util_1 = require("./util");
|
||||
const calculateSlot = require("cluster-key-slot");
|
||||
const ShardedSubscriber_1 = require("./ShardedSubscriber");
|
||||
const debug = (0, utils_1.Debug)("cluster:subscriberGroup");
|
||||
/**
|
||||
* Redis distinguishes between "normal" and sharded PubSub. When using the normal PubSub feature,
|
||||
* exactly one subscriber exists per cluster instance because the Redis cluster bus forwards
|
||||
* messages between shards. Sharded PubSub removes this limitation by making each shard
|
||||
* responsible for its own messages.
|
||||
*
|
||||
* This class coordinates one ShardedSubscriber per master node in the cluster, providing
|
||||
* sharded PubSub support while keeping the public API backward compatible.
|
||||
*/
|
||||
class ClusterSubscriberGroup {
|
||||
/**
|
||||
* Register callbacks
|
||||
*
|
||||
* @param cluster
|
||||
*/
|
||||
constructor(subscriberGroupEmitter, options) {
|
||||
this.subscriberGroupEmitter = subscriberGroupEmitter;
|
||||
this.options = options;
|
||||
this.shardedSubscribers = new Map();
|
||||
this.clusterSlots = [];
|
||||
// Simple [min, max] slot ranges aren't enough because you can migrate single slots
|
||||
this.subscriberToSlotsIndex = new Map();
|
||||
this.channels = new Map();
|
||||
this.failedAttemptsByNode = new Map();
|
||||
// Only latest pending reset kept; throttled by refreshSlotsCache's isRefreshing + backoff delay
|
||||
this.isResetting = false;
|
||||
this.pendingReset = null;
|
||||
/**
|
||||
* Handles failed subscriber connections by emitting an event to refresh the slots cache
|
||||
* after a backoff period.
|
||||
*
|
||||
* @param error
|
||||
* @param nodeKey
|
||||
*/
|
||||
this.handleSubscriberConnectFailed = (error, nodeKey) => {
|
||||
const currentAttempts = this.failedAttemptsByNode.get(nodeKey) || 0;
|
||||
const failedAttempts = currentAttempts + 1;
|
||||
this.failedAttemptsByNode.set(nodeKey, failedAttempts);
|
||||
const attempts = Math.min(failedAttempts, ClusterSubscriberGroup.MAX_RETRY_ATTEMPTS);
|
||||
const backoff = Math.min(ClusterSubscriberGroup.BASE_BACKOFF_MS * 2 ** attempts, ClusterSubscriberGroup.MAX_BACKOFF_MS);
|
||||
const jitter = Math.floor((Math.random() - 0.5) * (backoff * 0.5));
|
||||
const delay = Math.max(0, backoff + jitter);
|
||||
debug("Failed to connect subscriber for %s. Refreshing slots in %dms", nodeKey, delay);
|
||||
this.subscriberGroupEmitter.emit("subscriberConnectFailed", {
|
||||
delay,
|
||||
error,
|
||||
});
|
||||
};
|
||||
/**
|
||||
* Handles successful subscriber connections by resetting the failed attempts counter.
|
||||
*
|
||||
* @param nodeKey
|
||||
*/
|
||||
this.handleSubscriberConnectSucceeded = (nodeKey) => {
|
||||
this.failedAttemptsByNode.delete(nodeKey);
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Get the responsible subscriber.
|
||||
*
|
||||
* @param slot
|
||||
*/
|
||||
getResponsibleSubscriber(slot) {
|
||||
const nodeKey = this.clusterSlots[slot][0];
|
||||
const sub = this.shardedSubscribers.get(nodeKey);
|
||||
if (sub && sub.subscriberStatus === "idle") {
|
||||
sub
|
||||
.start()
|
||||
.then(() => {
|
||||
this.handleSubscriberConnectSucceeded(sub.getNodeKey());
|
||||
})
|
||||
.catch((err) => {
|
||||
this.handleSubscriberConnectFailed(err, sub.getNodeKey());
|
||||
});
|
||||
}
|
||||
return sub;
|
||||
}
|
||||
/**
|
||||
* Adds a channel for which this subscriber group is responsible
|
||||
*
|
||||
* @param channels
|
||||
*/
|
||||
addChannels(channels) {
|
||||
const slot = calculateSlot(channels[0]);
|
||||
// Check if the all channels belong to the same slot and otherwise reject the operation
|
||||
for (const c of channels) {
|
||||
if (calculateSlot(c) !== slot) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
const currChannels = this.channels.get(slot);
|
||||
if (!currChannels) {
|
||||
this.channels.set(slot, channels);
|
||||
}
|
||||
else {
|
||||
this.channels.set(slot, currChannels.concat(channels));
|
||||
}
|
||||
return Array.from(this.channels.values()).reduce((sum, array) => sum + array.length, 0);
|
||||
}
|
||||
/**
|
||||
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
||||
* @param channels
|
||||
*/
|
||||
removeChannels(channels) {
|
||||
const slot = calculateSlot(channels[0]);
|
||||
// Check if the all channels belong to the same slot and otherwise reject the operation
|
||||
for (const c of channels) {
|
||||
if (calculateSlot(c) !== slot) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
const slotChannels = this.channels.get(slot);
|
||||
if (slotChannels) {
|
||||
const updatedChannels = slotChannels.filter((c) => !channels.includes(c));
|
||||
this.channels.set(slot, updatedChannels);
|
||||
}
|
||||
return Array.from(this.channels.values()).reduce((sum, array) => sum + array.length, 0);
|
||||
}
|
||||
/**
|
||||
* Disconnect all subscribers and clear some of the internal state.
|
||||
*/
|
||||
stop() {
|
||||
for (const s of this.shardedSubscribers.values()) {
|
||||
s.stop();
|
||||
}
|
||||
// Clear subscriber instances and pending operations.
|
||||
// Channels are preserved for resubscription on reconnect.
|
||||
this.pendingReset = null;
|
||||
this.shardedSubscribers.clear();
|
||||
this.subscriberToSlotsIndex.clear();
|
||||
}
|
||||
/**
|
||||
* Start all not yet started subscribers
|
||||
*/
|
||||
start() {
|
||||
const startPromises = [];
|
||||
for (const s of this.shardedSubscribers.values()) {
|
||||
if (this.shouldStartSubscriber(s)) {
|
||||
startPromises.push(s
|
||||
.start()
|
||||
.then(() => {
|
||||
this.handleSubscriberConnectSucceeded(s.getNodeKey());
|
||||
})
|
||||
.catch((err) => {
|
||||
this.handleSubscriberConnectFailed(err, s.getNodeKey());
|
||||
}));
|
||||
this.subscriberGroupEmitter.emit("+subscriber");
|
||||
}
|
||||
}
|
||||
return Promise.all(startPromises);
|
||||
}
|
||||
/**
|
||||
* Resets the subscriber group by disconnecting all subscribers that are no longer needed and connecting new ones.
|
||||
*/
|
||||
async reset(clusterSlots, clusterNodes) {
|
||||
if (this.isResetting) {
|
||||
this.pendingReset = { slots: clusterSlots, nodes: clusterNodes };
|
||||
return;
|
||||
}
|
||||
this.isResetting = true;
|
||||
try {
|
||||
const hasTopologyChanged = this._refreshSlots(clusterSlots);
|
||||
const hasFailedSubscribers = this.hasUnhealthySubscribers();
|
||||
if (!hasTopologyChanged && !hasFailedSubscribers) {
|
||||
debug("No topology change detected or failed subscribers. Skipping reset.");
|
||||
return;
|
||||
}
|
||||
// For each of the sharded subscribers
|
||||
for (const [nodeKey, shardedSubscriber] of this.shardedSubscribers) {
|
||||
if (
|
||||
// If the subscriber is still responsible for a slot range and is healthy then keep it
|
||||
this.subscriberToSlotsIndex.has(nodeKey) &&
|
||||
shardedSubscriber.isHealthy()) {
|
||||
debug("Skipping deleting subscriber for %s", nodeKey);
|
||||
continue;
|
||||
}
|
||||
debug("Removing subscriber for %s", nodeKey);
|
||||
// Otherwise stop the subscriber and remove it
|
||||
shardedSubscriber.stop();
|
||||
this.shardedSubscribers.delete(nodeKey);
|
||||
this.subscriberGroupEmitter.emit("-subscriber");
|
||||
}
|
||||
const startPromises = [];
|
||||
// For each node in slots cache
|
||||
for (const [nodeKey, _] of this.subscriberToSlotsIndex) {
|
||||
const existingSubscriber = this.shardedSubscribers.get(nodeKey);
|
||||
// If we already have a subscriber for this node, only ensure it is healthy
|
||||
// when it now owns slots with active channel subscriptions.
|
||||
if (existingSubscriber && existingSubscriber.isHealthy()) {
|
||||
debug("Skipping creating new subscriber for %s", nodeKey);
|
||||
if (!existingSubscriber.isStarted() &&
|
||||
this.shouldStartSubscriber(existingSubscriber)) {
|
||||
startPromises.push(existingSubscriber
|
||||
.start()
|
||||
.then(() => {
|
||||
this.handleSubscriberConnectSucceeded(nodeKey);
|
||||
})
|
||||
.catch((error) => {
|
||||
this.handleSubscriberConnectFailed(error, nodeKey);
|
||||
}));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// If we have an existing subscriber but it is not healthy, stop it
|
||||
if (existingSubscriber && !existingSubscriber.isHealthy()) {
|
||||
debug("Replacing subscriber for %s", nodeKey);
|
||||
existingSubscriber.stop();
|
||||
this.shardedSubscribers.delete(nodeKey);
|
||||
this.subscriberGroupEmitter.emit("-subscriber");
|
||||
}
|
||||
debug("Creating new subscriber for %s", nodeKey);
|
||||
// Otherwise create a new subscriber
|
||||
const redis = clusterNodes.find((node) => {
|
||||
return (0, util_1.getNodeKey)(node.options) === nodeKey;
|
||||
});
|
||||
if (!redis) {
|
||||
debug("Failed to find node for key %s", nodeKey);
|
||||
continue;
|
||||
}
|
||||
const sub = new ShardedSubscriber_1.default(this.subscriberGroupEmitter, redis.options, this.options.redisOptions);
|
||||
this.shardedSubscribers.set(nodeKey, sub);
|
||||
if (this.shouldStartSubscriber(sub)) {
|
||||
startPromises.push(sub
|
||||
.start()
|
||||
.then(() => {
|
||||
this.handleSubscriberConnectSucceeded(nodeKey);
|
||||
})
|
||||
.catch((error) => {
|
||||
this.handleSubscriberConnectFailed(error, nodeKey);
|
||||
}));
|
||||
}
|
||||
this.subscriberGroupEmitter.emit("+subscriber");
|
||||
}
|
||||
// It's vital to await the start promises before resubscribing
|
||||
// Otherwise we might try to resubscribe to a subscriber that is not yet connected
|
||||
// This can cause a race condition
|
||||
await Promise.all(startPromises);
|
||||
this._resubscribe();
|
||||
this.subscriberGroupEmitter.emit("subscribersReady");
|
||||
}
|
||||
finally {
|
||||
this.isResetting = false;
|
||||
if (this.pendingReset) {
|
||||
const { slots, nodes } = this.pendingReset;
|
||||
this.pendingReset = null;
|
||||
await this.reset(slots, nodes);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Refreshes the subscriber-related slot ranges
|
||||
*
|
||||
* Returns false if no refresh was needed
|
||||
*
|
||||
* @param targetSlots
|
||||
*/
|
||||
_refreshSlots(targetSlots) {
|
||||
//If there was an actual change, then reassign the slot ranges
|
||||
// Also rebuild if subscriberToSlotsIndex is empty (e.g., after stop() was called)
|
||||
if (this._slotsAreEqual(targetSlots) && this.subscriberToSlotsIndex.size > 0) {
|
||||
debug("Nothing to refresh because the new cluster map is equal to the previous one.");
|
||||
return false;
|
||||
}
|
||||
debug("Refreshing the slots of the subscriber group.");
|
||||
//Rebuild the slots index
|
||||
this.subscriberToSlotsIndex = new Map();
|
||||
for (let slot = 0; slot < targetSlots.length; slot++) {
|
||||
const node = targetSlots[slot][0];
|
||||
if (!this.subscriberToSlotsIndex.has(node)) {
|
||||
this.subscriberToSlotsIndex.set(node, []);
|
||||
}
|
||||
this.subscriberToSlotsIndex.get(node).push(Number(slot));
|
||||
}
|
||||
//Update the cached slots map
|
||||
this.clusterSlots = JSON.parse(JSON.stringify(targetSlots));
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Resubscribes to the previous channels
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
_resubscribe() {
|
||||
if (this.shardedSubscribers) {
|
||||
this.shardedSubscribers.forEach((s, nodeKey) => {
|
||||
const subscriberSlots = this.subscriberToSlotsIndex.get(nodeKey);
|
||||
if (subscriberSlots) {
|
||||
//Resubscribe on the underlying connection
|
||||
subscriberSlots.forEach((ss) => {
|
||||
//Might return null if being disconnected
|
||||
const redis = s.getInstance();
|
||||
const channels = this.channels.get(ss);
|
||||
if (channels && channels.length > 0) {
|
||||
if (!redis || redis.status === "end") {
|
||||
return;
|
||||
}
|
||||
if (redis.status === "ready") {
|
||||
redis.ssubscribe(...channels).catch((err) => {
|
||||
// TODO: Should we emit an error event here?
|
||||
debug("Failed to ssubscribe on node %s: %s", nodeKey, err);
|
||||
});
|
||||
}
|
||||
else {
|
||||
redis.once("ready", () => {
|
||||
redis.ssubscribe(...channels).catch((err) => {
|
||||
// TODO: Should we emit an error event here?
|
||||
debug("Failed to ssubscribe on node %s: %s", nodeKey, err);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Deep equality of the cluster slots objects
|
||||
*
|
||||
* @param other
|
||||
* @private
|
||||
*/
|
||||
_slotsAreEqual(other) {
|
||||
if (this.clusterSlots === undefined) {
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
return JSON.stringify(this.clusterSlots) === JSON.stringify(other);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Checks if any subscribers are in an unhealthy state.
|
||||
*
|
||||
* A subscriber is considered unhealthy if:
|
||||
* - It exists but is not started (failed/disconnected)
|
||||
* - It's missing entirely for a node that should have one
|
||||
*
|
||||
* @returns true if any subscribers need to be recreated
|
||||
*/
|
||||
hasUnhealthySubscribers() {
|
||||
const hasFailedSubscribers = Array.from(this.shardedSubscribers.values()).some((sub) => !sub.isHealthy());
|
||||
const hasMissingSubscribers = Array.from(this.subscriberToSlotsIndex.keys()).some((nodeKey) => !this.shardedSubscribers.has(nodeKey));
|
||||
return hasFailedSubscribers || hasMissingSubscribers;
|
||||
}
|
||||
shouldStartSubscriber(sub) {
|
||||
if (sub.isStarted()) {
|
||||
return false;
|
||||
}
|
||||
if (!sub.isLazyConnect()) {
|
||||
return true;
|
||||
}
|
||||
const subscriberSlots = this.subscriberToSlotsIndex.get(sub.getNodeKey());
|
||||
if (!subscriberSlots) {
|
||||
return false;
|
||||
}
|
||||
return subscriberSlots.some((slot) => {
|
||||
const channels = this.channels.get(slot);
|
||||
return Boolean(channels && channels.length > 0);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.default = ClusterSubscriberGroup;
|
||||
// Retry strategy
|
||||
ClusterSubscriberGroup.MAX_RETRY_ATTEMPTS = 10;
|
||||
ClusterSubscriberGroup.MAX_BACKOFF_MS = 2000;
|
||||
ClusterSubscriberGroup.BASE_BACKOFF_MS = 100;
|
||||
37
node_modules/ioredis/built/cluster/ConnectionPool.d.ts
generated
vendored
Normal file
37
node_modules/ioredis/built/cluster/ConnectionPool.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import { RedisOptions, NodeKey, NodeRole } from "./util";
|
||||
import Redis from "../Redis";
|
||||
export default class ConnectionPool extends EventEmitter {
|
||||
private redisOptions;
|
||||
private nodes;
|
||||
private specifiedOptions;
|
||||
constructor(redisOptions: any);
|
||||
getNodes(role?: NodeRole): Redis[];
|
||||
getInstanceByKey(key: NodeKey): Redis;
|
||||
getSampleInstance(role: NodeRole): Redis;
|
||||
/**
|
||||
* Add a master node to the pool
|
||||
* @param node
|
||||
*/
|
||||
addMasterNode(node: RedisOptions): boolean;
|
||||
/**
|
||||
* Creates a Redis connection instance from the node options
|
||||
* @param node
|
||||
* @param readOnly
|
||||
*/
|
||||
createRedisFromOptions(node: RedisOptions, readOnly: boolean): Redis;
|
||||
/**
|
||||
* Find or create a connection to the node
|
||||
*/
|
||||
findOrCreate(node: RedisOptions, readOnly?: boolean): Redis;
|
||||
/**
|
||||
* Reset the pool with a set of nodes.
|
||||
* The old node will be removed.
|
||||
*/
|
||||
reset(nodes: RedisOptions[]): void;
|
||||
/**
|
||||
* Remove a node from the pool.
|
||||
*/
|
||||
private removeNode;
|
||||
}
|
||||
154
node_modules/ioredis/built/cluster/ConnectionPool.js
generated
vendored
Normal file
154
node_modules/ioredis/built/cluster/ConnectionPool.js
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const events_1 = require("events");
|
||||
const utils_1 = require("../utils");
|
||||
const util_1 = require("./util");
|
||||
const Redis_1 = require("../Redis");
|
||||
const debug = (0, utils_1.Debug)("cluster:connectionPool");
|
||||
class ConnectionPool extends events_1.EventEmitter {
|
||||
constructor(redisOptions) {
|
||||
super();
|
||||
this.redisOptions = redisOptions;
|
||||
// master + slave = all
|
||||
this.nodes = {
|
||||
all: {},
|
||||
master: {},
|
||||
slave: {},
|
||||
};
|
||||
this.specifiedOptions = {};
|
||||
}
|
||||
getNodes(role = "all") {
|
||||
const nodes = this.nodes[role];
|
||||
return Object.keys(nodes).map((key) => nodes[key]);
|
||||
}
|
||||
getInstanceByKey(key) {
|
||||
return this.nodes.all[key];
|
||||
}
|
||||
getSampleInstance(role) {
|
||||
const keys = Object.keys(this.nodes[role]);
|
||||
const sampleKey = (0, utils_1.sample)(keys);
|
||||
return this.nodes[role][sampleKey];
|
||||
}
|
||||
/**
|
||||
* Add a master node to the pool
|
||||
* @param node
|
||||
*/
|
||||
addMasterNode(node) {
|
||||
const key = (0, util_1.getNodeKey)(node.options);
|
||||
const redis = this.createRedisFromOptions(node, node.options.readOnly);
|
||||
//Master nodes aren't read-only
|
||||
if (!node.options.readOnly) {
|
||||
this.nodes.all[key] = redis;
|
||||
this.nodes.master[key] = redis;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Creates a Redis connection instance from the node options
|
||||
* @param node
|
||||
* @param readOnly
|
||||
*/
|
||||
createRedisFromOptions(node, readOnly) {
|
||||
const redis = new Redis_1.default((0, utils_1.defaults)({
|
||||
// Never try to reconnect when a node is lose,
|
||||
// instead, waiting for a `MOVED` error and
|
||||
// fetch the slots again.
|
||||
retryStrategy: null,
|
||||
// Offline queue should be enabled so that
|
||||
// we don't need to wait for the `ready` event
|
||||
// before sending commands to the node.
|
||||
enableOfflineQueue: true,
|
||||
readOnly: readOnly,
|
||||
}, node, this.redisOptions, { lazyConnect: true }));
|
||||
return redis;
|
||||
}
|
||||
/**
|
||||
* Find or create a connection to the node
|
||||
*/
|
||||
findOrCreate(node, readOnly = false) {
|
||||
const key = (0, util_1.getNodeKey)(node);
|
||||
readOnly = Boolean(readOnly);
|
||||
if (this.specifiedOptions[key]) {
|
||||
Object.assign(node, this.specifiedOptions[key]);
|
||||
}
|
||||
else {
|
||||
this.specifiedOptions[key] = node;
|
||||
}
|
||||
let redis;
|
||||
if (this.nodes.all[key]) {
|
||||
redis = this.nodes.all[key];
|
||||
if (redis.options.readOnly !== readOnly) {
|
||||
redis.options.readOnly = readOnly;
|
||||
debug("Change role of %s to %s", key, readOnly ? "slave" : "master");
|
||||
redis[readOnly ? "readonly" : "readwrite"]().catch(utils_1.noop);
|
||||
if (readOnly) {
|
||||
delete this.nodes.master[key];
|
||||
this.nodes.slave[key] = redis;
|
||||
}
|
||||
else {
|
||||
delete this.nodes.slave[key];
|
||||
this.nodes.master[key] = redis;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
debug("Connecting to %s as %s", key, readOnly ? "slave" : "master");
|
||||
redis = this.createRedisFromOptions(node, readOnly);
|
||||
this.nodes.all[key] = redis;
|
||||
this.nodes[readOnly ? "slave" : "master"][key] = redis;
|
||||
redis.once("end", () => {
|
||||
this.removeNode(key);
|
||||
this.emit("-node", redis, key);
|
||||
if (!Object.keys(this.nodes.all).length) {
|
||||
this.emit("drain");
|
||||
}
|
||||
});
|
||||
this.emit("+node", redis, key);
|
||||
redis.on("error", function (error) {
|
||||
this.emit("nodeError", error, key);
|
||||
});
|
||||
}
|
||||
return redis;
|
||||
}
|
||||
/**
|
||||
* Reset the pool with a set of nodes.
|
||||
* The old node will be removed.
|
||||
*/
|
||||
reset(nodes) {
|
||||
debug("Reset with %O", nodes);
|
||||
const newNodes = {};
|
||||
nodes.forEach((node) => {
|
||||
const key = (0, util_1.getNodeKey)(node);
|
||||
// Don't override the existing (master) node
|
||||
// when the current one is slave.
|
||||
if (!(node.readOnly && newNodes[key])) {
|
||||
newNodes[key] = node;
|
||||
}
|
||||
});
|
||||
Object.keys(this.nodes.all).forEach((key) => {
|
||||
if (!newNodes[key]) {
|
||||
debug("Disconnect %s because the node does not hold any slot", key);
|
||||
this.nodes.all[key].disconnect();
|
||||
this.removeNode(key);
|
||||
}
|
||||
});
|
||||
Object.keys(newNodes).forEach((key) => {
|
||||
const node = newNodes[key];
|
||||
this.findOrCreate(node, node.readOnly);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Remove a node from the pool.
|
||||
*/
|
||||
removeNode(key) {
|
||||
const { nodes } = this;
|
||||
if (nodes.all[key]) {
|
||||
debug("Remove %s from the pool", key);
|
||||
delete nodes.all[key];
|
||||
}
|
||||
delete nodes.master[key];
|
||||
delete nodes.slave[key];
|
||||
}
|
||||
}
|
||||
exports.default = ConnectionPool;
|
||||
20
node_modules/ioredis/built/cluster/DelayQueue.d.ts
generated
vendored
Normal file
20
node_modules/ioredis/built/cluster/DelayQueue.d.ts
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
export interface DelayQueueOptions {
|
||||
callback?: Function;
|
||||
timeout: number;
|
||||
}
|
||||
/**
|
||||
* Queue that runs items after specified duration
|
||||
*/
|
||||
export default class DelayQueue {
|
||||
private queues;
|
||||
private timeouts;
|
||||
/**
|
||||
* Add a new item to the queue
|
||||
*
|
||||
* @param bucket bucket name
|
||||
* @param item function that will run later
|
||||
* @param options
|
||||
*/
|
||||
push(bucket: string, item: Function, options: DelayQueueOptions): void;
|
||||
private execute;
|
||||
}
|
||||
53
node_modules/ioredis/built/cluster/DelayQueue.js
generated
vendored
Normal file
53
node_modules/ioredis/built/cluster/DelayQueue.js
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const utils_1 = require("../utils");
|
||||
const Deque = require("denque");
|
||||
const debug = (0, utils_1.Debug)("delayqueue");
|
||||
/**
|
||||
* Queue that runs items after specified duration
|
||||
*/
|
||||
class DelayQueue {
|
||||
constructor() {
|
||||
this.queues = {};
|
||||
this.timeouts = {};
|
||||
}
|
||||
/**
|
||||
* Add a new item to the queue
|
||||
*
|
||||
* @param bucket bucket name
|
||||
* @param item function that will run later
|
||||
* @param options
|
||||
*/
|
||||
push(bucket, item, options) {
|
||||
const callback = options.callback || process.nextTick;
|
||||
if (!this.queues[bucket]) {
|
||||
this.queues[bucket] = new Deque();
|
||||
}
|
||||
const queue = this.queues[bucket];
|
||||
queue.push(item);
|
||||
if (!this.timeouts[bucket]) {
|
||||
this.timeouts[bucket] = setTimeout(() => {
|
||||
callback(() => {
|
||||
this.timeouts[bucket] = null;
|
||||
this.execute(bucket);
|
||||
});
|
||||
}, options.timeout);
|
||||
}
|
||||
}
|
||||
execute(bucket) {
|
||||
const queue = this.queues[bucket];
|
||||
if (!queue) {
|
||||
return;
|
||||
}
|
||||
const { length } = queue;
|
||||
if (!length) {
|
||||
return;
|
||||
}
|
||||
debug("send %d commands in %s queue", length, bucket);
|
||||
this.queues[bucket] = null;
|
||||
while (queue.length > 0) {
|
||||
queue.shift()();
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.default = DelayQueue;
|
||||
36
node_modules/ioredis/built/cluster/ShardedSubscriber.d.ts
generated
vendored
Normal file
36
node_modules/ioredis/built/cluster/ShardedSubscriber.d.ts
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
/// <reference types="node" />
|
||||
import EventEmitter = require("events");
|
||||
import { RedisOptions } from "./util";
|
||||
import Redis from "../Redis";
|
||||
import { ClusterOptions } from "./ClusterOptions";
|
||||
declare const SubscriberStatus: {
|
||||
readonly IDLE: "idle";
|
||||
readonly STARTING: "starting";
|
||||
readonly CONNECTED: "connected";
|
||||
readonly STOPPING: "stopping";
|
||||
readonly ENDED: "ended";
|
||||
};
|
||||
declare type SubscriberStatus = typeof SubscriberStatus[keyof typeof SubscriberStatus];
|
||||
export default class ShardedSubscriber {
|
||||
private readonly emitter;
|
||||
private readonly nodeKey;
|
||||
private status;
|
||||
private instance;
|
||||
private connectPromise;
|
||||
private lazyConnect;
|
||||
private readonly messageListeners;
|
||||
constructor(emitter: EventEmitter, options: RedisOptions, redisOptions?: ClusterOptions["redisOptions"]);
|
||||
start(): Promise<void>;
|
||||
stop(): void;
|
||||
isStarted(): boolean;
|
||||
get subscriberStatus(): SubscriberStatus;
|
||||
isHealthy(): boolean;
|
||||
getInstance(): Redis | null;
|
||||
getNodeKey(): string;
|
||||
isLazyConnect(): boolean;
|
||||
private onEnd;
|
||||
private onError;
|
||||
private onMoved;
|
||||
private updateStatus;
|
||||
}
|
||||
export {};
|
||||
147
node_modules/ioredis/built/cluster/ShardedSubscriber.js
generated
vendored
Normal file
147
node_modules/ioredis/built/cluster/ShardedSubscriber.js
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const util_1 = require("./util");
|
||||
const utils_1 = require("../utils");
|
||||
const Redis_1 = require("../Redis");
|
||||
const debug = (0, utils_1.Debug)("cluster:subscriberGroup:shardedSubscriber");
|
||||
const SubscriberStatus = {
|
||||
IDLE: "idle",
|
||||
STARTING: "starting",
|
||||
CONNECTED: "connected",
|
||||
STOPPING: "stopping",
|
||||
ENDED: "ended",
|
||||
};
|
||||
const ALLOWED_STATUS_UPDATES = {
|
||||
[SubscriberStatus.IDLE]: [
|
||||
SubscriberStatus.STARTING,
|
||||
SubscriberStatus.STOPPING,
|
||||
SubscriberStatus.ENDED,
|
||||
],
|
||||
[SubscriberStatus.STARTING]: [
|
||||
SubscriberStatus.CONNECTED,
|
||||
SubscriberStatus.STOPPING,
|
||||
SubscriberStatus.ENDED,
|
||||
],
|
||||
[SubscriberStatus.CONNECTED]: [
|
||||
SubscriberStatus.STOPPING,
|
||||
SubscriberStatus.ENDED,
|
||||
],
|
||||
[SubscriberStatus.STOPPING]: [SubscriberStatus.ENDED],
|
||||
[SubscriberStatus.ENDED]: [],
|
||||
};
|
||||
class ShardedSubscriber {
|
||||
constructor(emitter, options, redisOptions) {
|
||||
var _a;
|
||||
this.emitter = emitter;
|
||||
this.status = SubscriberStatus.IDLE;
|
||||
this.instance = null;
|
||||
this.connectPromise = null;
|
||||
// Store listener references for cleanup
|
||||
this.messageListeners = new Map();
|
||||
this.onEnd = () => {
|
||||
this.updateStatus(SubscriberStatus.ENDED);
|
||||
this.emitter.emit("-node", this.instance, this.nodeKey);
|
||||
};
|
||||
this.onError = (error) => {
|
||||
this.emitter.emit("nodeError", error, this.nodeKey);
|
||||
};
|
||||
this.onMoved = () => {
|
||||
this.emitter.emit("moved");
|
||||
};
|
||||
this.instance = new Redis_1.default((0, utils_1.defaults)({
|
||||
enableReadyCheck: false,
|
||||
enableOfflineQueue: true,
|
||||
connectionName: (0, util_1.getConnectionName)("ssubscriber", options.connectionName),
|
||||
/**
|
||||
* Disable auto reconnection for subscribers.
|
||||
* The ClusterSubscriberGroup will handle the reconnection.
|
||||
*/
|
||||
retryStrategy: null,
|
||||
lazyConnect: true,
|
||||
}, options, redisOptions));
|
||||
this.lazyConnect = (_a = redisOptions === null || redisOptions === void 0 ? void 0 : redisOptions.lazyConnect) !== null && _a !== void 0 ? _a : true;
|
||||
this.nodeKey = (0, util_1.getNodeKey)(options);
|
||||
// Register listeners
|
||||
this.instance.on("end", this.onEnd);
|
||||
this.instance.on("error", this.onError);
|
||||
this.instance.on("moved", this.onMoved);
|
||||
for (const event of ["smessage", "smessageBuffer"]) {
|
||||
const listener = (...args) => {
|
||||
this.emitter.emit(event, ...args);
|
||||
};
|
||||
this.messageListeners.set(event, listener);
|
||||
this.instance.on(event, listener);
|
||||
}
|
||||
}
|
||||
async start() {
|
||||
if (this.connectPromise) {
|
||||
return this.connectPromise;
|
||||
}
|
||||
if (this.status === SubscriberStatus.STARTING ||
|
||||
this.status === SubscriberStatus.CONNECTED) {
|
||||
return;
|
||||
}
|
||||
if (this.status === SubscriberStatus.ENDED || !this.instance) {
|
||||
throw new Error(`Sharded subscriber ${this.nodeKey} cannot be restarted once ended.`);
|
||||
}
|
||||
this.updateStatus(SubscriberStatus.STARTING);
|
||||
this.connectPromise = this.instance.connect();
|
||||
try {
|
||||
await this.connectPromise;
|
||||
this.updateStatus(SubscriberStatus.CONNECTED);
|
||||
}
|
||||
catch (err) {
|
||||
this.updateStatus(SubscriberStatus.ENDED);
|
||||
throw err;
|
||||
}
|
||||
finally {
|
||||
this.connectPromise = null;
|
||||
}
|
||||
}
|
||||
stop() {
|
||||
this.updateStatus(SubscriberStatus.STOPPING);
|
||||
if (this.instance) {
|
||||
this.instance.disconnect();
|
||||
this.instance.removeAllListeners();
|
||||
this.messageListeners.clear();
|
||||
this.instance = null;
|
||||
}
|
||||
this.updateStatus(SubscriberStatus.ENDED);
|
||||
debug("stopped %s", this.nodeKey);
|
||||
}
|
||||
isStarted() {
|
||||
return [
|
||||
SubscriberStatus.CONNECTED,
|
||||
SubscriberStatus.STARTING,
|
||||
].includes(this.status);
|
||||
}
|
||||
get subscriberStatus() {
|
||||
return this.status;
|
||||
}
|
||||
isHealthy() {
|
||||
return ((this.status === SubscriberStatus.IDLE ||
|
||||
this.status === SubscriberStatus.CONNECTED ||
|
||||
this.status === SubscriberStatus.STARTING) &&
|
||||
this.instance !== null);
|
||||
}
|
||||
getInstance() {
|
||||
return this.instance;
|
||||
}
|
||||
getNodeKey() {
|
||||
return this.nodeKey;
|
||||
}
|
||||
isLazyConnect() {
|
||||
return this.lazyConnect;
|
||||
}
|
||||
updateStatus(nextStatus) {
|
||||
if (this.status === nextStatus) {
|
||||
return;
|
||||
}
|
||||
if (!ALLOWED_STATUS_UPDATES[this.status].includes(nextStatus)) {
|
||||
debug("Invalid status transition for %s: %s -> %s", this.nodeKey, this.status, nextStatus);
|
||||
return;
|
||||
}
|
||||
this.status = nextStatus;
|
||||
}
|
||||
}
|
||||
exports.default = ShardedSubscriber;
|
||||
163
node_modules/ioredis/built/cluster/index.d.ts
generated
vendored
Normal file
163
node_modules/ioredis/built/cluster/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import Command from "../Command";
|
||||
import Redis from "../Redis";
|
||||
import ScanStream from "../ScanStream";
|
||||
import { Transaction } from "../transaction";
|
||||
import { Callback, ScanStreamOptions, WriteableStream } from "../types";
|
||||
import Commander from "../utils/Commander";
|
||||
import { ClusterOptions } from "./ClusterOptions";
|
||||
import { NodeKey, NodeRole } from "./util";
|
||||
export declare type ClusterNode = string | number | {
|
||||
host?: string | undefined;
|
||||
port?: number | undefined;
|
||||
};
|
||||
declare type ClusterStatus = "end" | "close" | "wait" | "connecting" | "connect" | "ready" | "reconnecting" | "disconnecting";
|
||||
/**
|
||||
* Client for the official Redis Cluster
|
||||
*/
|
||||
declare class Cluster extends Commander {
|
||||
options: ClusterOptions;
|
||||
slots: NodeKey[][];
|
||||
status: ClusterStatus;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
_groupsIds: {
|
||||
[key: string]: number;
|
||||
};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
_groupsBySlot: number[];
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
isCluster: boolean;
|
||||
private startupNodes;
|
||||
private connectionPool;
|
||||
private manuallyClosing;
|
||||
private retryAttempts;
|
||||
private delayQueue;
|
||||
private offlineQueue;
|
||||
private subscriber;
|
||||
private shardedSubscribers;
|
||||
private slotsTimer;
|
||||
private reconnectTimeout;
|
||||
private isRefreshing;
|
||||
private _refreshSlotsCacheCallbacks;
|
||||
private _autoPipelines;
|
||||
private _runningAutoPipelines;
|
||||
private _readyDelayedCallbacks;
|
||||
private subscriberGroupEmitter;
|
||||
/**
|
||||
* Every time Cluster#connect() is called, this value will be
|
||||
* auto-incrementing. The purpose of this value is used for
|
||||
* discarding previous connect attampts when creating a new
|
||||
* connection.
|
||||
*/
|
||||
private connectionEpoch;
|
||||
/**
|
||||
* Creates an instance of Cluster.
|
||||
*/
|
||||
constructor(startupNodes: ClusterNode[], options?: ClusterOptions);
|
||||
/**
|
||||
* Connect to a cluster
|
||||
*/
|
||||
connect(): Promise<void>;
|
||||
/**
|
||||
* Disconnect from every node in the cluster.
|
||||
*/
|
||||
disconnect(reconnect?: boolean): void;
|
||||
/**
|
||||
* Quit the cluster gracefully.
|
||||
*/
|
||||
quit(callback?: Callback<"OK">): Promise<"OK">;
|
||||
/**
|
||||
* Create a new instance with the same startup nodes and options as the current one.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* var cluster = new Redis.Cluster([{ host: "127.0.0.1", port: "30001" }]);
|
||||
* var anotherCluster = cluster.duplicate();
|
||||
* ```
|
||||
*/
|
||||
duplicate(overrideStartupNodes?: any[], overrideOptions?: {}): Cluster;
|
||||
/**
|
||||
* Get nodes with the specified role
|
||||
*/
|
||||
nodes(role?: NodeRole): Redis[];
|
||||
/**
|
||||
* This is needed in order not to install a listener for each auto pipeline
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
delayUntilReady(callback: Callback): void;
|
||||
/**
|
||||
* Get the number of commands queued in automatic pipelines.
|
||||
*
|
||||
* This is not available (and returns 0) until the cluster is connected and slots information have been received.
|
||||
*/
|
||||
get autoPipelineQueueSize(): number;
|
||||
/**
|
||||
* Refresh the slot cache
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
refreshSlotsCache(callback?: Callback<void>): void;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command: Command, stream?: WriteableStream, node?: any): unknown;
|
||||
sscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
sscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
hscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
hscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
zscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
zscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
handleError(error: Error, ttl: {
|
||||
value?: any;
|
||||
}, handlers: any): void;
|
||||
private resetOfflineQueue;
|
||||
private clearNodesRefreshInterval;
|
||||
private resetNodesRefreshInterval;
|
||||
/**
|
||||
* Change cluster instance's status
|
||||
*/
|
||||
private setStatus;
|
||||
/**
|
||||
* Called when closed to check whether a reconnection should be made
|
||||
*/
|
||||
private handleCloseEvent;
|
||||
/**
|
||||
* Flush offline queue with error.
|
||||
*/
|
||||
private flushQueue;
|
||||
private executeOfflineCommands;
|
||||
private natMapper;
|
||||
private getInfoFromNode;
|
||||
private invokeReadyDelayedCallbacks;
|
||||
/**
|
||||
* Check whether Cluster is able to process commands
|
||||
*/
|
||||
private readyCheck;
|
||||
private resolveSrv;
|
||||
private dnsLookup;
|
||||
/**
|
||||
* Normalize startup nodes, and resolving hostnames to IPs.
|
||||
*
|
||||
* This process happens every time when #connect() is called since
|
||||
* #startupNodes and DNS records may chanage.
|
||||
*/
|
||||
private resolveStartupNodeHostnames;
|
||||
private createScanStream;
|
||||
private createShardedSubscriberGroup;
|
||||
}
|
||||
interface Cluster extends EventEmitter {
|
||||
}
|
||||
interface Cluster extends Transaction {
|
||||
}
|
||||
export default Cluster;
|
||||
937
node_modules/ioredis/built/cluster/index.js
generated
vendored
Normal file
937
node_modules/ioredis/built/cluster/index.js
generated
vendored
Normal file
@@ -0,0 +1,937 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
const events_1 = require("events");
|
||||
const redis_errors_1 = require("redis-errors");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const Command_1 = require("../Command");
|
||||
const ClusterAllFailedError_1 = require("../errors/ClusterAllFailedError");
|
||||
const Redis_1 = require("../Redis");
|
||||
const ScanStream_1 = require("../ScanStream");
|
||||
const transaction_1 = require("../transaction");
|
||||
const utils_1 = require("../utils");
|
||||
const applyMixin_1 = require("../utils/applyMixin");
|
||||
const Commander_1 = require("../utils/Commander");
|
||||
const ClusterOptions_1 = require("./ClusterOptions");
|
||||
const ClusterSubscriber_1 = require("./ClusterSubscriber");
|
||||
const ConnectionPool_1 = require("./ConnectionPool");
|
||||
const DelayQueue_1 = require("./DelayQueue");
|
||||
const util_1 = require("./util");
|
||||
const Deque = require("denque");
|
||||
const ClusterSubscriberGroup_1 = require("./ClusterSubscriberGroup");
|
||||
const debug = (0, utils_1.Debug)("cluster");
|
||||
const REJECT_OVERWRITTEN_COMMANDS = new WeakSet();
|
||||
/**
|
||||
* Client for the official Redis Cluster
|
||||
*/
|
||||
class Cluster extends Commander_1.default {
|
||||
/**
|
||||
* Creates an instance of Cluster.
|
||||
*/
|
||||
//TODO: Add an option that enables or disables sharded PubSub
|
||||
constructor(startupNodes, options = {}) {
|
||||
super();
|
||||
this.slots = [];
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this._groupsIds = {};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this._groupsBySlot = Array(16384);
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this.isCluster = true;
|
||||
this.retryAttempts = 0;
|
||||
this.delayQueue = new DelayQueue_1.default();
|
||||
this.offlineQueue = new Deque();
|
||||
this.isRefreshing = false;
|
||||
this._refreshSlotsCacheCallbacks = [];
|
||||
this._autoPipelines = new Map();
|
||||
this._runningAutoPipelines = new Set();
|
||||
this._readyDelayedCallbacks = [];
|
||||
/**
|
||||
* Every time Cluster#connect() is called, this value will be
|
||||
* auto-incrementing. The purpose of this value is used for
|
||||
* discarding previous connect attampts when creating a new
|
||||
* connection.
|
||||
*/
|
||||
this.connectionEpoch = 0;
|
||||
events_1.EventEmitter.call(this);
|
||||
this.startupNodes = startupNodes;
|
||||
this.options = (0, utils_1.defaults)({}, options, ClusterOptions_1.DEFAULT_CLUSTER_OPTIONS, this.options);
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.createShardedSubscriberGroup();
|
||||
}
|
||||
if (this.options.redisOptions &&
|
||||
this.options.redisOptions.keyPrefix &&
|
||||
!this.options.keyPrefix) {
|
||||
this.options.keyPrefix = this.options.redisOptions.keyPrefix;
|
||||
}
|
||||
// validate options
|
||||
if (typeof this.options.scaleReads !== "function" &&
|
||||
["all", "master", "slave"].indexOf(this.options.scaleReads) === -1) {
|
||||
throw new Error('Invalid option scaleReads "' +
|
||||
this.options.scaleReads +
|
||||
'". Expected "all", "master", "slave" or a custom function');
|
||||
}
|
||||
this.connectionPool = new ConnectionPool_1.default(this.options.redisOptions);
|
||||
this.connectionPool.on("-node", (redis, key) => {
|
||||
this.emit("-node", redis);
|
||||
});
|
||||
this.connectionPool.on("+node", (redis) => {
|
||||
this.emit("+node", redis);
|
||||
});
|
||||
this.connectionPool.on("drain", () => {
|
||||
this.setStatus("close");
|
||||
});
|
||||
this.connectionPool.on("nodeError", (error, key) => {
|
||||
this.emit("node error", error, key);
|
||||
});
|
||||
this.subscriber = new ClusterSubscriber_1.default(this.connectionPool, this);
|
||||
if (this.options.scripts) {
|
||||
Object.entries(this.options.scripts).forEach(([name, definition]) => {
|
||||
this.defineCommand(name, definition);
|
||||
});
|
||||
}
|
||||
if (this.options.lazyConnect) {
|
||||
this.setStatus("wait");
|
||||
}
|
||||
else {
|
||||
this.connect().catch((err) => {
|
||||
debug("connecting failed: %s", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Connect to a cluster
|
||||
*/
|
||||
connect() {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (this.status === "connecting" ||
|
||||
this.status === "connect" ||
|
||||
this.status === "ready") {
|
||||
reject(new Error("Redis is already connecting/connected"));
|
||||
return;
|
||||
}
|
||||
const epoch = ++this.connectionEpoch;
|
||||
this.setStatus("connecting");
|
||||
this.resolveStartupNodeHostnames()
|
||||
.then((nodes) => {
|
||||
if (this.connectionEpoch !== epoch) {
|
||||
debug("discard connecting after resolving startup nodes because epoch not match: %d != %d", epoch, this.connectionEpoch);
|
||||
reject(new redis_errors_1.RedisError("Connection is discarded because a new connection is made"));
|
||||
return;
|
||||
}
|
||||
if (this.status !== "connecting") {
|
||||
debug("discard connecting after resolving startup nodes because the status changed to %s", this.status);
|
||||
reject(new redis_errors_1.RedisError("Connection is aborted"));
|
||||
return;
|
||||
}
|
||||
this.connectionPool.reset(nodes);
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers
|
||||
.reset(this.slots, this.connectionPool.getNodes("all"))
|
||||
.catch((err) => {
|
||||
// TODO should we emit an error event here?
|
||||
debug("Error while starting subscribers: %s", err);
|
||||
});
|
||||
}
|
||||
const readyHandler = () => {
|
||||
this.setStatus("ready");
|
||||
this.retryAttempts = 0;
|
||||
this.executeOfflineCommands();
|
||||
this.resetNodesRefreshInterval();
|
||||
resolve();
|
||||
};
|
||||
let closeListener = undefined;
|
||||
const refreshListener = () => {
|
||||
this.invokeReadyDelayedCallbacks(undefined);
|
||||
this.removeListener("close", closeListener);
|
||||
this.manuallyClosing = false;
|
||||
this.setStatus("connect");
|
||||
if (this.options.enableReadyCheck) {
|
||||
this.readyCheck((err, fail) => {
|
||||
if (err || fail) {
|
||||
debug("Ready check failed (%s). Reconnecting...", err || fail);
|
||||
if (this.status === "connect") {
|
||||
this.disconnect(true);
|
||||
}
|
||||
}
|
||||
else {
|
||||
readyHandler();
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
readyHandler();
|
||||
}
|
||||
};
|
||||
closeListener = () => {
|
||||
const error = new Error("None of startup nodes is available");
|
||||
this.removeListener("refresh", refreshListener);
|
||||
this.invokeReadyDelayedCallbacks(error);
|
||||
reject(error);
|
||||
};
|
||||
this.once("refresh", refreshListener);
|
||||
this.once("close", closeListener);
|
||||
this.once("close", this.handleCloseEvent.bind(this));
|
||||
this.refreshSlotsCache((err) => {
|
||||
if (err && err.message === ClusterAllFailedError_1.default.defaultMessage) {
|
||||
Redis_1.default.prototype.silentEmit.call(this, "error", err);
|
||||
this.connectionPool.reset([]);
|
||||
}
|
||||
});
|
||||
this.subscriber.start();
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers.start().catch((err) => {
|
||||
// TODO should we emit an error event here?
|
||||
debug("Error while starting subscribers: %s", err);
|
||||
});
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
this.setStatus("close");
|
||||
this.handleCloseEvent(err);
|
||||
this.invokeReadyDelayedCallbacks(err);
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Disconnect from every node in the cluster.
|
||||
*/
|
||||
disconnect(reconnect = false) {
|
||||
const status = this.status;
|
||||
this.setStatus("disconnecting");
|
||||
if (!reconnect) {
|
||||
this.manuallyClosing = true;
|
||||
}
|
||||
if (this.reconnectTimeout && !reconnect) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
this.reconnectTimeout = null;
|
||||
debug("Canceled reconnecting attempts");
|
||||
}
|
||||
this.clearNodesRefreshInterval();
|
||||
this.subscriber.stop();
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers.stop();
|
||||
}
|
||||
if (status === "wait") {
|
||||
this.setStatus("close");
|
||||
this.handleCloseEvent();
|
||||
}
|
||||
else {
|
||||
this.connectionPool.reset([]);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Quit the cluster gracefully.
|
||||
*/
|
||||
quit(callback) {
|
||||
const status = this.status;
|
||||
this.setStatus("disconnecting");
|
||||
this.manuallyClosing = true;
|
||||
if (this.reconnectTimeout) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
this.reconnectTimeout = null;
|
||||
}
|
||||
this.clearNodesRefreshInterval();
|
||||
this.subscriber.stop();
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers.stop();
|
||||
}
|
||||
if (status === "wait") {
|
||||
const ret = (0, standard_as_callback_1.default)(Promise.resolve("OK"), callback);
|
||||
// use setImmediate to make sure "close" event
|
||||
// being emitted after quit() is returned
|
||||
setImmediate(function () {
|
||||
this.setStatus("close");
|
||||
this.handleCloseEvent();
|
||||
}.bind(this));
|
||||
return ret;
|
||||
}
|
||||
return (0, standard_as_callback_1.default)(Promise.all(this.nodes().map((node) => node.quit().catch((err) => {
|
||||
// Ignore the error caused by disconnecting since
|
||||
// we're disconnecting...
|
||||
if (err.message === utils_1.CONNECTION_CLOSED_ERROR_MSG) {
|
||||
return "OK";
|
||||
}
|
||||
throw err;
|
||||
}))).then(() => "OK"), callback);
|
||||
}
|
||||
/**
|
||||
* Create a new instance with the same startup nodes and options as the current one.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* var cluster = new Redis.Cluster([{ host: "127.0.0.1", port: "30001" }]);
|
||||
* var anotherCluster = cluster.duplicate();
|
||||
* ```
|
||||
*/
|
||||
duplicate(overrideStartupNodes = [], overrideOptions = {}) {
|
||||
const startupNodes = overrideStartupNodes.length > 0
|
||||
? overrideStartupNodes
|
||||
: this.startupNodes.slice(0);
|
||||
const options = Object.assign({}, this.options, overrideOptions);
|
||||
return new Cluster(startupNodes, options);
|
||||
}
|
||||
/**
|
||||
* Get nodes with the specified role
|
||||
*/
|
||||
nodes(role = "all") {
|
||||
if (role !== "all" && role !== "master" && role !== "slave") {
|
||||
throw new Error('Invalid role "' + role + '". Expected "all", "master" or "slave"');
|
||||
}
|
||||
return this.connectionPool.getNodes(role);
|
||||
}
|
||||
/**
|
||||
* This is needed in order not to install a listener for each auto pipeline
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
delayUntilReady(callback) {
|
||||
this._readyDelayedCallbacks.push(callback);
|
||||
}
|
||||
/**
|
||||
* Get the number of commands queued in automatic pipelines.
|
||||
*
|
||||
* This is not available (and returns 0) until the cluster is connected and slots information have been received.
|
||||
*/
|
||||
get autoPipelineQueueSize() {
|
||||
let queued = 0;
|
||||
for (const pipeline of this._autoPipelines.values()) {
|
||||
queued += pipeline.length;
|
||||
}
|
||||
return queued;
|
||||
}
|
||||
/**
|
||||
* Refresh the slot cache
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
refreshSlotsCache(callback) {
|
||||
if (callback) {
|
||||
this._refreshSlotsCacheCallbacks.push(callback);
|
||||
}
|
||||
if (this.isRefreshing) {
|
||||
return;
|
||||
}
|
||||
this.isRefreshing = true;
|
||||
const _this = this;
|
||||
const wrapper = (error) => {
|
||||
this.isRefreshing = false;
|
||||
for (const callback of this._refreshSlotsCacheCallbacks) {
|
||||
callback(error);
|
||||
}
|
||||
this._refreshSlotsCacheCallbacks = [];
|
||||
};
|
||||
const nodes = (0, utils_1.shuffle)(this.connectionPool.getNodes());
|
||||
let lastNodeError = null;
|
||||
function tryNode(index) {
|
||||
if (index === nodes.length) {
|
||||
const error = new ClusterAllFailedError_1.default(ClusterAllFailedError_1.default.defaultMessage, lastNodeError);
|
||||
return wrapper(error);
|
||||
}
|
||||
const node = nodes[index];
|
||||
const key = `${node.options.host}:${node.options.port}`;
|
||||
debug("getting slot cache from %s", key);
|
||||
_this.getInfoFromNode(node, function (err) {
|
||||
switch (_this.status) {
|
||||
case "close":
|
||||
case "end":
|
||||
return wrapper(new Error("Cluster is disconnected."));
|
||||
case "disconnecting":
|
||||
return wrapper(new Error("Cluster is disconnecting."));
|
||||
}
|
||||
if (err) {
|
||||
_this.emit("node error", err, key);
|
||||
lastNodeError = err;
|
||||
tryNode(index + 1);
|
||||
}
|
||||
else {
|
||||
_this.emit("refresh");
|
||||
wrapper();
|
||||
}
|
||||
});
|
||||
}
|
||||
tryNode(0);
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command, stream, node) {
|
||||
if (this.status === "wait") {
|
||||
this.connect().catch(utils_1.noop);
|
||||
}
|
||||
if (this.status === "end") {
|
||||
command.reject(new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
return command.promise;
|
||||
}
|
||||
let to = this.options.scaleReads;
|
||||
if (to !== "master") {
|
||||
const isCommandReadOnly = command.isReadOnly ||
|
||||
((0, commands_1.exists)(command.name) && (0, commands_1.hasFlag)(command.name, "readonly"));
|
||||
if (!isCommandReadOnly) {
|
||||
to = "master";
|
||||
}
|
||||
}
|
||||
let targetSlot = node ? node.slot : command.getSlot();
|
||||
const ttl = {};
|
||||
const _this = this;
|
||||
if (!node && !REJECT_OVERWRITTEN_COMMANDS.has(command)) {
|
||||
REJECT_OVERWRITTEN_COMMANDS.add(command);
|
||||
const reject = command.reject;
|
||||
command.reject = function (err) {
|
||||
const partialTry = tryConnection.bind(null, true);
|
||||
_this.handleError(err, ttl, {
|
||||
moved: function (slot, key) {
|
||||
debug("command %s is moved to %s", command.name, key);
|
||||
targetSlot = Number(slot);
|
||||
if (_this.slots[slot]) {
|
||||
_this.slots[slot][0] = key;
|
||||
}
|
||||
else {
|
||||
_this.slots[slot] = [key];
|
||||
}
|
||||
_this._groupsBySlot[slot] =
|
||||
_this._groupsIds[_this.slots[slot].join(";")];
|
||||
_this.connectionPool.findOrCreate(_this.natMapper(key));
|
||||
tryConnection();
|
||||
debug("refreshing slot caches... (triggered by MOVED error)");
|
||||
_this.refreshSlotsCache();
|
||||
},
|
||||
ask: function (slot, key) {
|
||||
debug("command %s is required to ask %s:%s", command.name, key);
|
||||
const mapped = _this.natMapper(key);
|
||||
_this.connectionPool.findOrCreate(mapped);
|
||||
tryConnection(false, `${mapped.host}:${mapped.port}`);
|
||||
},
|
||||
tryagain: partialTry,
|
||||
clusterDown: partialTry,
|
||||
connectionClosed: partialTry,
|
||||
maxRedirections: function (redirectionError) {
|
||||
reject.call(command, redirectionError);
|
||||
},
|
||||
defaults: function () {
|
||||
reject.call(command, err);
|
||||
},
|
||||
});
|
||||
};
|
||||
}
|
||||
tryConnection();
|
||||
function tryConnection(random, asking) {
|
||||
if (_this.status === "end") {
|
||||
command.reject(new redis_errors_1.AbortError("Cluster is ended."));
|
||||
return;
|
||||
}
|
||||
let redis;
|
||||
if (_this.status === "ready" || command.name === "cluster") {
|
||||
if (node && node.redis) {
|
||||
redis = node.redis;
|
||||
}
|
||||
else if (Command_1.default.checkFlag("ENTER_SUBSCRIBER_MODE", command.name) ||
|
||||
Command_1.default.checkFlag("EXIT_SUBSCRIBER_MODE", command.name)) {
|
||||
if (_this.options.shardedSubscribers &&
|
||||
(command.name == "ssubscribe" || command.name == "sunsubscribe")) {
|
||||
const sub = _this.shardedSubscribers.getResponsibleSubscriber(targetSlot);
|
||||
if (!sub) {
|
||||
command.reject(new redis_errors_1.AbortError(`No sharded subscriber for slot: ${targetSlot}`));
|
||||
return;
|
||||
}
|
||||
let status = -1;
|
||||
if (command.name == "ssubscribe") {
|
||||
status = _this.shardedSubscribers.addChannels(command.getKeys());
|
||||
}
|
||||
if (command.name == "sunsubscribe") {
|
||||
status = _this.shardedSubscribers.removeChannels(command.getKeys());
|
||||
}
|
||||
if (status !== -1) {
|
||||
redis = sub.getInstance();
|
||||
}
|
||||
else {
|
||||
command.reject(new redis_errors_1.AbortError("Possible CROSSSLOT error: All channels must hash to the same slot"));
|
||||
}
|
||||
}
|
||||
else {
|
||||
redis = _this.subscriber.getInstance();
|
||||
}
|
||||
if (!redis) {
|
||||
command.reject(new redis_errors_1.AbortError("No subscriber for the cluster"));
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!random) {
|
||||
if (typeof targetSlot === "number" && _this.slots[targetSlot]) {
|
||||
const nodeKeys = _this.slots[targetSlot];
|
||||
if (typeof to === "function") {
|
||||
const nodes = nodeKeys.map(function (key) {
|
||||
return _this.connectionPool.getInstanceByKey(key);
|
||||
});
|
||||
redis = to(nodes, command);
|
||||
if (Array.isArray(redis)) {
|
||||
redis = (0, utils_1.sample)(redis);
|
||||
}
|
||||
if (!redis) {
|
||||
redis = nodes[0];
|
||||
}
|
||||
}
|
||||
else {
|
||||
let key;
|
||||
if (to === "all") {
|
||||
key = (0, utils_1.sample)(nodeKeys);
|
||||
}
|
||||
else if (to === "slave" && nodeKeys.length > 1) {
|
||||
key = (0, utils_1.sample)(nodeKeys, 1);
|
||||
}
|
||||
else {
|
||||
key = nodeKeys[0];
|
||||
}
|
||||
redis = _this.connectionPool.getInstanceByKey(key);
|
||||
}
|
||||
}
|
||||
if (asking) {
|
||||
redis = _this.connectionPool.getInstanceByKey(asking);
|
||||
redis.asking();
|
||||
}
|
||||
}
|
||||
if (!redis) {
|
||||
redis =
|
||||
(typeof to === "function"
|
||||
? null
|
||||
: _this.connectionPool.getSampleInstance(to)) ||
|
||||
_this.connectionPool.getSampleInstance("all");
|
||||
}
|
||||
}
|
||||
if (node && !node.redis) {
|
||||
node.redis = redis;
|
||||
}
|
||||
}
|
||||
if (redis) {
|
||||
redis.sendCommand(command, stream);
|
||||
}
|
||||
else if (_this.options.enableOfflineQueue) {
|
||||
_this.offlineQueue.push({
|
||||
command: command,
|
||||
stream: stream,
|
||||
node: node,
|
||||
});
|
||||
}
|
||||
else {
|
||||
command.reject(new Error("Cluster isn't ready and enableOfflineQueue options is false"));
|
||||
}
|
||||
}
|
||||
return command.promise;
|
||||
}
|
||||
sscanStream(key, options) {
|
||||
return this.createScanStream("sscan", { key, options });
|
||||
}
|
||||
sscanBufferStream(key, options) {
|
||||
return this.createScanStream("sscanBuffer", { key, options });
|
||||
}
|
||||
hscanStream(key, options) {
|
||||
return this.createScanStream("hscan", { key, options });
|
||||
}
|
||||
hscanBufferStream(key, options) {
|
||||
return this.createScanStream("hscanBuffer", { key, options });
|
||||
}
|
||||
zscanStream(key, options) {
|
||||
return this.createScanStream("zscan", { key, options });
|
||||
}
|
||||
zscanBufferStream(key, options) {
|
||||
return this.createScanStream("zscanBuffer", { key, options });
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
handleError(error, ttl, handlers) {
|
||||
if (typeof ttl.value === "undefined") {
|
||||
ttl.value = this.options.maxRedirections;
|
||||
}
|
||||
else {
|
||||
ttl.value -= 1;
|
||||
}
|
||||
if (ttl.value <= 0) {
|
||||
handlers.maxRedirections(new Error("Too many Cluster redirections. Last error: " + error));
|
||||
return;
|
||||
}
|
||||
const errv = error.message.split(" ");
|
||||
if (errv[0] === "MOVED") {
|
||||
const timeout = this.options.retryDelayOnMoved;
|
||||
if (timeout && typeof timeout === "number") {
|
||||
this.delayQueue.push("moved", handlers.moved.bind(null, errv[1], errv[2]), { timeout });
|
||||
}
|
||||
else {
|
||||
handlers.moved(errv[1], errv[2]);
|
||||
}
|
||||
}
|
||||
else if (errv[0] === "ASK") {
|
||||
handlers.ask(errv[1], errv[2]);
|
||||
}
|
||||
else if (errv[0] === "TRYAGAIN") {
|
||||
this.delayQueue.push("tryagain", handlers.tryagain, {
|
||||
timeout: this.options.retryDelayOnTryAgain,
|
||||
});
|
||||
}
|
||||
else if (errv[0] === "CLUSTERDOWN" &&
|
||||
this.options.retryDelayOnClusterDown > 0) {
|
||||
this.delayQueue.push("clusterdown", handlers.connectionClosed, {
|
||||
timeout: this.options.retryDelayOnClusterDown,
|
||||
callback: this.refreshSlotsCache.bind(this),
|
||||
});
|
||||
}
|
||||
else if (error.message === utils_1.CONNECTION_CLOSED_ERROR_MSG &&
|
||||
this.options.retryDelayOnFailover > 0 &&
|
||||
this.status === "ready") {
|
||||
this.delayQueue.push("failover", handlers.connectionClosed, {
|
||||
timeout: this.options.retryDelayOnFailover,
|
||||
callback: this.refreshSlotsCache.bind(this),
|
||||
});
|
||||
}
|
||||
else {
|
||||
handlers.defaults();
|
||||
}
|
||||
}
|
||||
resetOfflineQueue() {
|
||||
this.offlineQueue = new Deque();
|
||||
}
|
||||
clearNodesRefreshInterval() {
|
||||
if (this.slotsTimer) {
|
||||
clearTimeout(this.slotsTimer);
|
||||
this.slotsTimer = null;
|
||||
}
|
||||
}
|
||||
resetNodesRefreshInterval() {
|
||||
if (this.slotsTimer || !this.options.slotsRefreshInterval) {
|
||||
return;
|
||||
}
|
||||
const nextRound = () => {
|
||||
this.slotsTimer = setTimeout(() => {
|
||||
debug('refreshing slot caches... (triggered by "slotsRefreshInterval" option)');
|
||||
this.refreshSlotsCache(() => {
|
||||
nextRound();
|
||||
});
|
||||
}, this.options.slotsRefreshInterval);
|
||||
};
|
||||
nextRound();
|
||||
}
|
||||
/**
|
||||
* Change cluster instance's status
|
||||
*/
|
||||
setStatus(status) {
|
||||
debug("status: %s -> %s", this.status || "[empty]", status);
|
||||
this.status = status;
|
||||
process.nextTick(() => {
|
||||
this.emit(status);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Called when closed to check whether a reconnection should be made
|
||||
*/
|
||||
handleCloseEvent(reason) {
|
||||
var _a;
|
||||
if (reason) {
|
||||
debug("closed because %s", reason);
|
||||
}
|
||||
let retryDelay;
|
||||
if (!this.manuallyClosing &&
|
||||
typeof this.options.clusterRetryStrategy === "function") {
|
||||
retryDelay = this.options.clusterRetryStrategy.call(this, ++this.retryAttempts, reason);
|
||||
}
|
||||
if (typeof retryDelay === "number") {
|
||||
this.setStatus("reconnecting");
|
||||
this.reconnectTimeout = setTimeout(() => {
|
||||
this.reconnectTimeout = null;
|
||||
debug("Cluster is disconnected. Retrying after %dms", retryDelay);
|
||||
this.connect().catch(function (err) {
|
||||
debug("Got error %s when reconnecting. Ignoring...", err);
|
||||
});
|
||||
}, retryDelay);
|
||||
}
|
||||
else {
|
||||
if (this.options.shardedSubscribers) {
|
||||
(_a = this.subscriberGroupEmitter) === null || _a === void 0 ? void 0 : _a.removeAllListeners();
|
||||
}
|
||||
this.setStatus("end");
|
||||
this.flushQueue(new Error("None of startup nodes is available"));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Flush offline queue with error.
|
||||
*/
|
||||
flushQueue(error) {
|
||||
let item;
|
||||
while ((item = this.offlineQueue.shift())) {
|
||||
item.command.reject(error);
|
||||
}
|
||||
}
|
||||
executeOfflineCommands() {
|
||||
if (this.offlineQueue.length) {
|
||||
debug("send %d commands in offline queue", this.offlineQueue.length);
|
||||
const offlineQueue = this.offlineQueue;
|
||||
this.resetOfflineQueue();
|
||||
let item;
|
||||
while ((item = offlineQueue.shift())) {
|
||||
this.sendCommand(item.command, item.stream, item.node);
|
||||
}
|
||||
}
|
||||
}
|
||||
natMapper(nodeKey) {
|
||||
const key = typeof nodeKey === "string"
|
||||
? nodeKey
|
||||
: `${nodeKey.host}:${nodeKey.port}`;
|
||||
let mapped = null;
|
||||
if (this.options.natMap && typeof this.options.natMap === "function") {
|
||||
mapped = this.options.natMap(key);
|
||||
}
|
||||
else if (this.options.natMap && typeof this.options.natMap === "object") {
|
||||
mapped = this.options.natMap[key];
|
||||
}
|
||||
if (mapped) {
|
||||
debug("NAT mapping %s -> %O", key, mapped);
|
||||
return Object.assign({}, mapped);
|
||||
}
|
||||
return typeof nodeKey === "string"
|
||||
? (0, util_1.nodeKeyToRedisOptions)(nodeKey)
|
||||
: nodeKey;
|
||||
}
|
||||
getInfoFromNode(redis, callback) {
|
||||
if (!redis) {
|
||||
return callback(new Error("Node is disconnected"));
|
||||
}
|
||||
// Use a duplication of the connection to avoid
|
||||
// timeouts when the connection is in the blocking
|
||||
// mode (e.g. waiting for BLPOP).
|
||||
const duplicatedConnection = redis.duplicate({
|
||||
enableOfflineQueue: true,
|
||||
enableReadyCheck: false,
|
||||
retryStrategy: null,
|
||||
connectionName: (0, util_1.getConnectionName)("refresher", this.options.redisOptions && this.options.redisOptions.connectionName),
|
||||
});
|
||||
// Ignore error events since we will handle
|
||||
// exceptions for the CLUSTER SLOTS command.
|
||||
duplicatedConnection.on("error", utils_1.noop);
|
||||
duplicatedConnection.cluster("SLOTS", (0, utils_1.timeout)((err, result) => {
|
||||
duplicatedConnection.disconnect();
|
||||
if (err) {
|
||||
debug("error encountered running CLUSTER.SLOTS: %s", err);
|
||||
return callback(err);
|
||||
}
|
||||
if (this.status === "disconnecting" ||
|
||||
this.status === "close" ||
|
||||
this.status === "end") {
|
||||
debug("ignore CLUSTER.SLOTS results (count: %d) since cluster status is %s", result.length, this.status);
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
const nodes = [];
|
||||
debug("cluster slots result count: %d", result.length);
|
||||
for (let i = 0; i < result.length; ++i) {
|
||||
const items = result[i];
|
||||
const slotRangeStart = items[0];
|
||||
const slotRangeEnd = items[1];
|
||||
const keys = [];
|
||||
for (let j = 2; j < items.length; j++) {
|
||||
if (!items[j][0]) {
|
||||
continue;
|
||||
}
|
||||
const node = this.natMapper({
|
||||
host: items[j][0],
|
||||
port: items[j][1],
|
||||
});
|
||||
node.readOnly = j !== 2;
|
||||
nodes.push(node);
|
||||
keys.push(node.host + ":" + node.port);
|
||||
}
|
||||
debug("cluster slots result [%d]: slots %d~%d served by %s", i, slotRangeStart, slotRangeEnd, keys);
|
||||
for (let slot = slotRangeStart; slot <= slotRangeEnd; slot++) {
|
||||
this.slots[slot] = keys;
|
||||
}
|
||||
}
|
||||
// Assign to each node keys a numeric value to make autopipeline comparison faster.
|
||||
this._groupsIds = Object.create(null);
|
||||
let j = 0;
|
||||
for (let i = 0; i < 16384; i++) {
|
||||
const target = (this.slots[i] || []).join(";");
|
||||
if (!target.length) {
|
||||
this._groupsBySlot[i] = undefined;
|
||||
continue;
|
||||
}
|
||||
if (!this._groupsIds[target]) {
|
||||
this._groupsIds[target] = ++j;
|
||||
}
|
||||
this._groupsBySlot[i] = this._groupsIds[target];
|
||||
}
|
||||
this.connectionPool.reset(nodes);
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers
|
||||
.reset(this.slots, this.connectionPool.getNodes("all"))
|
||||
.catch((err) => {
|
||||
// TODO should we emit an error event here?
|
||||
debug("Error while starting subscribers: %s", err);
|
||||
});
|
||||
}
|
||||
callback();
|
||||
}, this.options.slotsRefreshTimeout));
|
||||
}
|
||||
invokeReadyDelayedCallbacks(err) {
|
||||
for (const c of this._readyDelayedCallbacks) {
|
||||
process.nextTick(c, err);
|
||||
}
|
||||
this._readyDelayedCallbacks = [];
|
||||
}
|
||||
/**
|
||||
* Check whether Cluster is able to process commands
|
||||
*/
|
||||
readyCheck(callback) {
|
||||
this.cluster("INFO", (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (typeof res !== "string") {
|
||||
return callback();
|
||||
}
|
||||
let state;
|
||||
const lines = res.split("\r\n");
|
||||
for (let i = 0; i < lines.length; ++i) {
|
||||
const parts = lines[i].split(":");
|
||||
if (parts[0] === "cluster_state") {
|
||||
state = parts[1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (state === "fail") {
|
||||
debug("cluster state not ok (%s)", state);
|
||||
callback(null, state);
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}
|
||||
resolveSrv(hostname) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.options.resolveSrv(hostname, (err, records) => {
|
||||
if (err) {
|
||||
return reject(err);
|
||||
}
|
||||
const self = this, groupedRecords = (0, util_1.groupSrvRecords)(records), sortedKeys = Object.keys(groupedRecords).sort((a, b) => parseInt(a) - parseInt(b));
|
||||
function tryFirstOne(err) {
|
||||
if (!sortedKeys.length) {
|
||||
return reject(err);
|
||||
}
|
||||
const key = sortedKeys[0], group = groupedRecords[key], record = (0, util_1.weightSrvRecords)(group);
|
||||
if (!group.records.length) {
|
||||
sortedKeys.shift();
|
||||
}
|
||||
self.dnsLookup(record.name).then((host) => resolve({
|
||||
host,
|
||||
port: record.port,
|
||||
}), tryFirstOne);
|
||||
}
|
||||
tryFirstOne();
|
||||
});
|
||||
});
|
||||
}
|
||||
dnsLookup(hostname) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.options.dnsLookup(hostname, (err, address) => {
|
||||
if (err) {
|
||||
debug("failed to resolve hostname %s to IP: %s", hostname, err.message);
|
||||
reject(err);
|
||||
}
|
||||
else {
|
||||
debug("resolved hostname %s to IP %s", hostname, address);
|
||||
resolve(address);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Normalize startup nodes, and resolving hostnames to IPs.
|
||||
*
|
||||
* This process happens every time when #connect() is called since
|
||||
* #startupNodes and DNS records may chanage.
|
||||
*/
|
||||
async resolveStartupNodeHostnames() {
|
||||
if (!Array.isArray(this.startupNodes) || this.startupNodes.length === 0) {
|
||||
throw new Error("`startupNodes` should contain at least one node.");
|
||||
}
|
||||
const startupNodes = (0, util_1.normalizeNodeOptions)(this.startupNodes);
|
||||
const hostnames = (0, util_1.getUniqueHostnamesFromOptions)(startupNodes);
|
||||
if (hostnames.length === 0) {
|
||||
return startupNodes;
|
||||
}
|
||||
const configs = await Promise.all(hostnames.map((this.options.useSRVRecords ? this.resolveSrv : this.dnsLookup).bind(this)));
|
||||
const hostnameToConfig = (0, utils_1.zipMap)(hostnames, configs);
|
||||
return startupNodes.map((node) => {
|
||||
const config = hostnameToConfig.get(node.host);
|
||||
if (!config) {
|
||||
return node;
|
||||
}
|
||||
if (this.options.useSRVRecords) {
|
||||
return Object.assign({}, node, config);
|
||||
}
|
||||
return Object.assign({}, node, { host: config });
|
||||
});
|
||||
}
|
||||
createScanStream(command, { key, options = {} }) {
|
||||
return new ScanStream_1.default({
|
||||
objectMode: true,
|
||||
key: key,
|
||||
redis: this,
|
||||
command: command,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
createShardedSubscriberGroup() {
|
||||
this.subscriberGroupEmitter = new events_1.EventEmitter();
|
||||
this.shardedSubscribers = new ClusterSubscriberGroup_1.default(this.subscriberGroupEmitter, this.options);
|
||||
// Error handler used only for sharded-subscriber-triggered slots cache refreshes.
|
||||
// Normal (non-subscriber) connections are created with lazyConnect: true and can
|
||||
// become zombied. For sharded subscribers, a ClusterAllFailedError means
|
||||
// we have lost all nodes from the subscriber perspective and must tear down.
|
||||
const refreshSlotsCacheCallback = (err) => {
|
||||
// Disconnect only when refreshing the slots cache fails with ClusterAllFailedError
|
||||
if (err instanceof ClusterAllFailedError_1.default) {
|
||||
this.disconnect(true);
|
||||
}
|
||||
};
|
||||
this.subscriberGroupEmitter.on("-node", (redis, nodeKey) => {
|
||||
this.emit("-node", redis, nodeKey);
|
||||
this.refreshSlotsCache(refreshSlotsCacheCallback);
|
||||
});
|
||||
this.subscriberGroupEmitter.on("subscriberConnectFailed", ({ delay, error }) => {
|
||||
this.emit("error", error);
|
||||
setTimeout(() => {
|
||||
this.refreshSlotsCache(refreshSlotsCacheCallback);
|
||||
}, delay);
|
||||
});
|
||||
this.subscriberGroupEmitter.on("moved", () => {
|
||||
this.refreshSlotsCache(refreshSlotsCacheCallback);
|
||||
});
|
||||
this.subscriberGroupEmitter.on("-subscriber", () => {
|
||||
this.emit("-subscriber");
|
||||
});
|
||||
this.subscriberGroupEmitter.on("+subscriber", () => {
|
||||
this.emit("+subscriber");
|
||||
});
|
||||
this.subscriberGroupEmitter.on("nodeError", (error, nodeKey) => {
|
||||
this.emit("nodeError", error, nodeKey);
|
||||
});
|
||||
this.subscriberGroupEmitter.on("subscribersReady", () => {
|
||||
this.emit("subscribersReady");
|
||||
});
|
||||
for (const event of ["smessage", "smessageBuffer"]) {
|
||||
this.subscriberGroupEmitter.on(event, (arg1, arg2, arg3) => {
|
||||
this.emit(event, arg1, arg2, arg3);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
(0, applyMixin_1.default)(Cluster, events_1.EventEmitter);
|
||||
(0, transaction_1.addTransactionSupport)(Cluster.prototype);
|
||||
exports.default = Cluster;
|
||||
25
node_modules/ioredis/built/cluster/util.d.ts
generated
vendored
Normal file
25
node_modules/ioredis/built/cluster/util.d.ts
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
/// <reference types="node" />
|
||||
import { SrvRecord } from "dns";
|
||||
export declare type NodeKey = string;
|
||||
export declare type NodeRole = "master" | "slave" | "all";
|
||||
export interface RedisOptions {
|
||||
port: number;
|
||||
host: string;
|
||||
username?: string | undefined;
|
||||
password?: string | undefined;
|
||||
[key: string]: any;
|
||||
}
|
||||
export interface SrvRecordsGroup {
|
||||
totalWeight: number;
|
||||
records: SrvRecord[];
|
||||
}
|
||||
export interface GroupedSrvRecords {
|
||||
[key: number]: SrvRecordsGroup;
|
||||
}
|
||||
export declare function getNodeKey(node: RedisOptions): NodeKey;
|
||||
export declare function nodeKeyToRedisOptions(nodeKey: NodeKey): RedisOptions;
|
||||
export declare function normalizeNodeOptions(nodes: Array<string | number | object>): RedisOptions[];
|
||||
export declare function getUniqueHostnamesFromOptions(nodes: RedisOptions[]): string[];
|
||||
export declare function groupSrvRecords(records: SrvRecord[]): GroupedSrvRecords;
|
||||
export declare function weightSrvRecords(recordsGroup: SrvRecordsGroup): SrvRecord;
|
||||
export declare function getConnectionName(component: any, nodeConnectionName: any): string;
|
||||
100
node_modules/ioredis/built/cluster/util.js
generated
vendored
Normal file
100
node_modules/ioredis/built/cluster/util.js
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getConnectionName = exports.weightSrvRecords = exports.groupSrvRecords = exports.getUniqueHostnamesFromOptions = exports.normalizeNodeOptions = exports.nodeKeyToRedisOptions = exports.getNodeKey = void 0;
|
||||
const utils_1 = require("../utils");
|
||||
const net_1 = require("net");
|
||||
function getNodeKey(node) {
|
||||
node.port = node.port || 6379;
|
||||
node.host = node.host || "127.0.0.1";
|
||||
return node.host + ":" + node.port;
|
||||
}
|
||||
exports.getNodeKey = getNodeKey;
|
||||
function nodeKeyToRedisOptions(nodeKey) {
|
||||
const portIndex = nodeKey.lastIndexOf(":");
|
||||
if (portIndex === -1) {
|
||||
throw new Error(`Invalid node key ${nodeKey}`);
|
||||
}
|
||||
return {
|
||||
host: nodeKey.slice(0, portIndex),
|
||||
port: Number(nodeKey.slice(portIndex + 1)),
|
||||
};
|
||||
}
|
||||
exports.nodeKeyToRedisOptions = nodeKeyToRedisOptions;
|
||||
function normalizeNodeOptions(nodes) {
|
||||
return nodes.map((node) => {
|
||||
const options = {};
|
||||
if (typeof node === "object") {
|
||||
Object.assign(options, node);
|
||||
}
|
||||
else if (typeof node === "string") {
|
||||
Object.assign(options, (0, utils_1.parseURL)(node));
|
||||
}
|
||||
else if (typeof node === "number") {
|
||||
options.port = node;
|
||||
}
|
||||
else {
|
||||
throw new Error("Invalid argument " + node);
|
||||
}
|
||||
if (typeof options.port === "string") {
|
||||
options.port = parseInt(options.port, 10);
|
||||
}
|
||||
// Cluster mode only support db 0
|
||||
delete options.db;
|
||||
if (!options.port) {
|
||||
options.port = 6379;
|
||||
}
|
||||
if (!options.host) {
|
||||
options.host = "127.0.0.1";
|
||||
}
|
||||
return (0, utils_1.resolveTLSProfile)(options);
|
||||
});
|
||||
}
|
||||
exports.normalizeNodeOptions = normalizeNodeOptions;
|
||||
function getUniqueHostnamesFromOptions(nodes) {
|
||||
const uniqueHostsMap = {};
|
||||
nodes.forEach((node) => {
|
||||
uniqueHostsMap[node.host] = true;
|
||||
});
|
||||
return Object.keys(uniqueHostsMap).filter((host) => !(0, net_1.isIP)(host));
|
||||
}
|
||||
exports.getUniqueHostnamesFromOptions = getUniqueHostnamesFromOptions;
|
||||
function groupSrvRecords(records) {
|
||||
const recordsByPriority = {};
|
||||
for (const record of records) {
|
||||
if (!recordsByPriority.hasOwnProperty(record.priority)) {
|
||||
recordsByPriority[record.priority] = {
|
||||
totalWeight: record.weight,
|
||||
records: [record],
|
||||
};
|
||||
}
|
||||
else {
|
||||
recordsByPriority[record.priority].totalWeight += record.weight;
|
||||
recordsByPriority[record.priority].records.push(record);
|
||||
}
|
||||
}
|
||||
return recordsByPriority;
|
||||
}
|
||||
exports.groupSrvRecords = groupSrvRecords;
|
||||
function weightSrvRecords(recordsGroup) {
|
||||
if (recordsGroup.records.length === 1) {
|
||||
recordsGroup.totalWeight = 0;
|
||||
return recordsGroup.records.shift();
|
||||
}
|
||||
// + `recordsGroup.records.length` to support `weight` 0
|
||||
const random = Math.floor(Math.random() * (recordsGroup.totalWeight + recordsGroup.records.length));
|
||||
let total = 0;
|
||||
for (const [i, record] of recordsGroup.records.entries()) {
|
||||
total += 1 + record.weight;
|
||||
if (total > random) {
|
||||
recordsGroup.totalWeight -= record.weight;
|
||||
recordsGroup.records.splice(i, 1);
|
||||
return record;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.weightSrvRecords = weightSrvRecords;
|
||||
function getConnectionName(component, nodeConnectionName) {
|
||||
const prefix = `ioredis-cluster(${component})`;
|
||||
return nodeConnectionName ? `${prefix}:${nodeConnectionName}` : prefix;
|
||||
}
|
||||
exports.getConnectionName = getConnectionName;
|
||||
12
node_modules/ioredis/built/connectors/AbstractConnector.d.ts
generated
vendored
Normal file
12
node_modules/ioredis/built/connectors/AbstractConnector.d.ts
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
import { NetStream } from "../types";
|
||||
export declare type ErrorEmitter = (type: string, err: Error) => void;
|
||||
export default abstract class AbstractConnector {
|
||||
firstError?: Error;
|
||||
protected connecting: boolean;
|
||||
protected stream: NetStream;
|
||||
private disconnectTimeout;
|
||||
constructor(disconnectTimeout: number);
|
||||
check(info: any): boolean;
|
||||
disconnect(): void;
|
||||
abstract connect(_: ErrorEmitter): Promise<NetStream>;
|
||||
}
|
||||
26
node_modules/ioredis/built/connectors/AbstractConnector.js
generated
vendored
Normal file
26
node_modules/ioredis/built/connectors/AbstractConnector.js
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const utils_1 = require("../utils");
|
||||
const debug = (0, utils_1.Debug)("AbstractConnector");
|
||||
class AbstractConnector {
|
||||
constructor(disconnectTimeout) {
|
||||
this.connecting = false;
|
||||
this.disconnectTimeout = disconnectTimeout;
|
||||
}
|
||||
check(info) {
|
||||
return true;
|
||||
}
|
||||
disconnect() {
|
||||
this.connecting = false;
|
||||
if (this.stream) {
|
||||
const stream = this.stream; // Make sure callbacks refer to the same instance
|
||||
const timeout = setTimeout(() => {
|
||||
debug("stream %s:%s still open, destroying it", stream.remoteAddress, stream.remotePort);
|
||||
stream.destroy();
|
||||
}, this.disconnectTimeout);
|
||||
stream.on("close", () => clearTimeout(timeout));
|
||||
stream.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.default = AbstractConnector;
|
||||
5
node_modules/ioredis/built/connectors/ConnectorConstructor.d.ts
generated
vendored
Normal file
5
node_modules/ioredis/built/connectors/ConnectorConstructor.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import AbstractConnector from "./AbstractConnector";
|
||||
interface ConnectorConstructor {
|
||||
new (options: unknown): AbstractConnector;
|
||||
}
|
||||
export default ConnectorConstructor;
|
||||
2
node_modules/ioredis/built/connectors/ConnectorConstructor.js
generated
vendored
Normal file
2
node_modules/ioredis/built/connectors/ConnectorConstructor.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
11
node_modules/ioredis/built/connectors/SentinelConnector/FailoverDetector.d.ts
generated
vendored
Normal file
11
node_modules/ioredis/built/connectors/SentinelConnector/FailoverDetector.d.ts
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
import SentinelConnector from "./index";
|
||||
import { Sentinel } from "./types";
|
||||
export declare class FailoverDetector {
|
||||
private connector;
|
||||
private sentinels;
|
||||
private isDisconnected;
|
||||
constructor(connector: SentinelConnector, sentinels: Sentinel[]);
|
||||
cleanup(): void;
|
||||
subscribe(): Promise<void>;
|
||||
private disconnect;
|
||||
}
|
||||
45
node_modules/ioredis/built/connectors/SentinelConnector/FailoverDetector.js
generated
vendored
Normal file
45
node_modules/ioredis/built/connectors/SentinelConnector/FailoverDetector.js
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FailoverDetector = void 0;
|
||||
const utils_1 = require("../../utils");
|
||||
const debug = (0, utils_1.Debug)("FailoverDetector");
|
||||
const CHANNEL_NAME = "+switch-master";
|
||||
class FailoverDetector {
|
||||
// sentinels can't be used for regular commands after this
|
||||
constructor(connector, sentinels) {
|
||||
this.isDisconnected = false;
|
||||
this.connector = connector;
|
||||
this.sentinels = sentinels;
|
||||
}
|
||||
cleanup() {
|
||||
this.isDisconnected = true;
|
||||
for (const sentinel of this.sentinels) {
|
||||
sentinel.client.disconnect();
|
||||
}
|
||||
}
|
||||
async subscribe() {
|
||||
debug("Starting FailoverDetector");
|
||||
const promises = [];
|
||||
for (const sentinel of this.sentinels) {
|
||||
const promise = sentinel.client.subscribe(CHANNEL_NAME).catch((err) => {
|
||||
debug("Failed to subscribe to failover messages on sentinel %s:%s (%s)", sentinel.address.host || "127.0.0.1", sentinel.address.port || 26739, err.message);
|
||||
});
|
||||
promises.push(promise);
|
||||
sentinel.client.on("message", (channel) => {
|
||||
if (!this.isDisconnected && channel === CHANNEL_NAME) {
|
||||
this.disconnect();
|
||||
}
|
||||
});
|
||||
}
|
||||
await Promise.all(promises);
|
||||
}
|
||||
disconnect() {
|
||||
// Avoid disconnecting more than once per failover.
|
||||
// A new FailoverDetector will be created after reconnecting.
|
||||
this.isDisconnected = true;
|
||||
debug("Failover detected, disconnecting");
|
||||
// Will call this.cleanup()
|
||||
this.connector.disconnect();
|
||||
}
|
||||
}
|
||||
exports.FailoverDetector = FailoverDetector;
|
||||
13
node_modules/ioredis/built/connectors/SentinelConnector/SentinelIterator.d.ts
generated
vendored
Normal file
13
node_modules/ioredis/built/connectors/SentinelConnector/SentinelIterator.d.ts
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
import { SentinelAddress } from "./types";
|
||||
export default class SentinelIterator implements Iterator<Partial<SentinelAddress>> {
|
||||
private cursor;
|
||||
private sentinels;
|
||||
constructor(sentinels: Array<Partial<SentinelAddress>>);
|
||||
next(): {
|
||||
done: boolean;
|
||||
value: Partial<SentinelAddress>;
|
||||
};
|
||||
reset(moveCurrentEndpointToFirst: boolean): void;
|
||||
add(sentinel: SentinelAddress): boolean;
|
||||
toString(): string;
|
||||
}
|
||||
37
node_modules/ioredis/built/connectors/SentinelConnector/SentinelIterator.js
generated
vendored
Normal file
37
node_modules/ioredis/built/connectors/SentinelConnector/SentinelIterator.js
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
function isSentinelEql(a, b) {
|
||||
return ((a.host || "127.0.0.1") === (b.host || "127.0.0.1") &&
|
||||
(a.port || 26379) === (b.port || 26379));
|
||||
}
|
||||
class SentinelIterator {
|
||||
constructor(sentinels) {
|
||||
this.cursor = 0;
|
||||
this.sentinels = sentinels.slice(0);
|
||||
}
|
||||
next() {
|
||||
const done = this.cursor >= this.sentinels.length;
|
||||
return { done, value: done ? undefined : this.sentinels[this.cursor++] };
|
||||
}
|
||||
reset(moveCurrentEndpointToFirst) {
|
||||
if (moveCurrentEndpointToFirst &&
|
||||
this.sentinels.length > 1 &&
|
||||
this.cursor !== 1) {
|
||||
this.sentinels.unshift(...this.sentinels.splice(this.cursor - 1));
|
||||
}
|
||||
this.cursor = 0;
|
||||
}
|
||||
add(sentinel) {
|
||||
for (let i = 0; i < this.sentinels.length; i++) {
|
||||
if (isSentinelEql(sentinel, this.sentinels[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
this.sentinels.push(sentinel);
|
||||
return true;
|
||||
}
|
||||
toString() {
|
||||
return `${JSON.stringify(this.sentinels)} @${this.cursor}`;
|
||||
}
|
||||
}
|
||||
exports.default = SentinelIterator;
|
||||
72
node_modules/ioredis/built/connectors/SentinelConnector/index.d.ts
generated
vendored
Normal file
72
node_modules/ioredis/built/connectors/SentinelConnector/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import { NatMap } from "../../cluster/ClusterOptions";
|
||||
import { ConnectionOptions } from "tls";
|
||||
import SentinelIterator from "./SentinelIterator";
|
||||
import { SentinelAddress } from "./types";
|
||||
import AbstractConnector, { ErrorEmitter } from "../AbstractConnector";
|
||||
import { NetStream } from "../../types";
|
||||
interface AddressFromResponse {
|
||||
port: string;
|
||||
ip: string;
|
||||
flags?: string | undefined;
|
||||
}
|
||||
declare type PreferredSlaves = ((slaves: AddressFromResponse[]) => AddressFromResponse | null) | Array<{
|
||||
port: string;
|
||||
ip: string;
|
||||
prio?: number | undefined;
|
||||
}> | {
|
||||
port: string;
|
||||
ip: string;
|
||||
prio?: number | undefined;
|
||||
};
|
||||
export { SentinelAddress, SentinelIterator };
|
||||
export interface SentinelConnectionOptions {
|
||||
/**
|
||||
* Master group name of the Sentinel
|
||||
*/
|
||||
name?: string | undefined;
|
||||
/**
|
||||
* @default "master"
|
||||
*/
|
||||
role?: "master" | "slave" | undefined;
|
||||
tls?: ConnectionOptions | undefined;
|
||||
sentinelUsername?: string | undefined;
|
||||
sentinelPassword?: string | undefined;
|
||||
sentinels?: Array<Partial<SentinelAddress>> | undefined;
|
||||
sentinelRetryStrategy?: ((retryAttempts: number) => number | void | null) | undefined;
|
||||
sentinelReconnectStrategy?: ((retryAttempts: number) => number | void | null) | undefined;
|
||||
preferredSlaves?: PreferredSlaves | undefined;
|
||||
connectTimeout?: number | undefined;
|
||||
disconnectTimeout?: number | undefined;
|
||||
sentinelCommandTimeout?: number | undefined;
|
||||
enableTLSForSentinelMode?: boolean | undefined;
|
||||
sentinelTLS?: ConnectionOptions | undefined;
|
||||
natMap?: NatMap | undefined;
|
||||
updateSentinels?: boolean | undefined;
|
||||
/**
|
||||
* @default 10
|
||||
*/
|
||||
sentinelMaxConnections?: number | undefined;
|
||||
failoverDetector?: boolean | undefined;
|
||||
}
|
||||
export default class SentinelConnector extends AbstractConnector {
|
||||
protected options: SentinelConnectionOptions;
|
||||
emitter: EventEmitter | null;
|
||||
protected sentinelIterator: SentinelIterator;
|
||||
private retryAttempts;
|
||||
private failoverDetector;
|
||||
constructor(options: SentinelConnectionOptions);
|
||||
check(info: {
|
||||
role?: string;
|
||||
}): boolean;
|
||||
disconnect(): void;
|
||||
connect(eventEmitter: ErrorEmitter): Promise<NetStream>;
|
||||
private updateSentinels;
|
||||
private resolveMaster;
|
||||
private resolveSlave;
|
||||
private sentinelNatResolve;
|
||||
private connectToSentinel;
|
||||
private resolve;
|
||||
private initFailoverDetector;
|
||||
}
|
||||
305
node_modules/ioredis/built/connectors/SentinelConnector/index.js
generated
vendored
Normal file
305
node_modules/ioredis/built/connectors/SentinelConnector/index.js
generated
vendored
Normal file
@@ -0,0 +1,305 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SentinelIterator = void 0;
|
||||
const net_1 = require("net");
|
||||
const utils_1 = require("../../utils");
|
||||
const tls_1 = require("tls");
|
||||
const SentinelIterator_1 = require("./SentinelIterator");
|
||||
exports.SentinelIterator = SentinelIterator_1.default;
|
||||
const AbstractConnector_1 = require("../AbstractConnector");
|
||||
const Redis_1 = require("../../Redis");
|
||||
const FailoverDetector_1 = require("./FailoverDetector");
|
||||
const debug = (0, utils_1.Debug)("SentinelConnector");
|
||||
class SentinelConnector extends AbstractConnector_1.default {
|
||||
constructor(options) {
|
||||
super(options.disconnectTimeout);
|
||||
this.options = options;
|
||||
this.emitter = null;
|
||||
this.failoverDetector = null;
|
||||
if (!this.options.sentinels.length) {
|
||||
throw new Error("Requires at least one sentinel to connect to.");
|
||||
}
|
||||
if (!this.options.name) {
|
||||
throw new Error("Requires the name of master.");
|
||||
}
|
||||
this.sentinelIterator = new SentinelIterator_1.default(this.options.sentinels);
|
||||
}
|
||||
check(info) {
|
||||
const roleMatches = !info.role || this.options.role === info.role;
|
||||
if (!roleMatches) {
|
||||
debug("role invalid, expected %s, but got %s", this.options.role, info.role);
|
||||
// Start from the next item.
|
||||
// Note that `reset` will move the cursor to the previous element,
|
||||
// so we advance two steps here.
|
||||
this.sentinelIterator.next();
|
||||
this.sentinelIterator.next();
|
||||
this.sentinelIterator.reset(true);
|
||||
}
|
||||
return roleMatches;
|
||||
}
|
||||
disconnect() {
|
||||
super.disconnect();
|
||||
if (this.failoverDetector) {
|
||||
this.failoverDetector.cleanup();
|
||||
}
|
||||
}
|
||||
connect(eventEmitter) {
|
||||
this.connecting = true;
|
||||
this.retryAttempts = 0;
|
||||
let lastError;
|
||||
const connectToNext = async () => {
|
||||
const endpoint = this.sentinelIterator.next();
|
||||
if (endpoint.done) {
|
||||
this.sentinelIterator.reset(false);
|
||||
const retryDelay = typeof this.options.sentinelRetryStrategy === "function"
|
||||
? this.options.sentinelRetryStrategy(++this.retryAttempts)
|
||||
: null;
|
||||
let errorMsg = typeof retryDelay !== "number"
|
||||
? "All sentinels are unreachable and retry is disabled."
|
||||
: `All sentinels are unreachable. Retrying from scratch after ${retryDelay}ms.`;
|
||||
if (lastError) {
|
||||
errorMsg += ` Last error: ${lastError.message}`;
|
||||
}
|
||||
debug(errorMsg);
|
||||
const error = new Error(errorMsg);
|
||||
if (typeof retryDelay === "number") {
|
||||
eventEmitter("error", error);
|
||||
await new Promise((resolve) => setTimeout(resolve, retryDelay));
|
||||
return connectToNext();
|
||||
}
|
||||
else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
let resolved = null;
|
||||
let err = null;
|
||||
try {
|
||||
resolved = await this.resolve(endpoint.value);
|
||||
}
|
||||
catch (error) {
|
||||
err = error;
|
||||
}
|
||||
if (!this.connecting) {
|
||||
throw new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG);
|
||||
}
|
||||
const endpointAddress = endpoint.value.host + ":" + endpoint.value.port;
|
||||
if (resolved) {
|
||||
debug("resolved: %s:%s from sentinel %s", resolved.host, resolved.port, endpointAddress);
|
||||
if (this.options.enableTLSForSentinelMode && this.options.tls) {
|
||||
Object.assign(resolved, this.options.tls);
|
||||
this.stream = (0, tls_1.connect)(resolved);
|
||||
this.stream.once("secureConnect", this.initFailoverDetector.bind(this));
|
||||
}
|
||||
else {
|
||||
this.stream = (0, net_1.createConnection)(resolved);
|
||||
this.stream.once("connect", this.initFailoverDetector.bind(this));
|
||||
}
|
||||
this.stream.once("error", (err) => {
|
||||
this.firstError = err;
|
||||
});
|
||||
return this.stream;
|
||||
}
|
||||
else {
|
||||
const errorMsg = err
|
||||
? "failed to connect to sentinel " +
|
||||
endpointAddress +
|
||||
" because " +
|
||||
err.message
|
||||
: "connected to sentinel " +
|
||||
endpointAddress +
|
||||
" successfully, but got an invalid reply: " +
|
||||
resolved;
|
||||
debug(errorMsg);
|
||||
eventEmitter("sentinelError", new Error(errorMsg));
|
||||
if (err) {
|
||||
lastError = err;
|
||||
}
|
||||
return connectToNext();
|
||||
}
|
||||
};
|
||||
return connectToNext();
|
||||
}
|
||||
async updateSentinels(client) {
|
||||
if (!this.options.updateSentinels) {
|
||||
return;
|
||||
}
|
||||
const result = await client.sentinel("sentinels", this.options.name);
|
||||
if (!Array.isArray(result)) {
|
||||
return;
|
||||
}
|
||||
result
|
||||
.map(utils_1.packObject)
|
||||
.forEach((sentinel) => {
|
||||
const flags = sentinel.flags ? sentinel.flags.split(",") : [];
|
||||
if (flags.indexOf("disconnected") === -1 &&
|
||||
sentinel.ip &&
|
||||
sentinel.port) {
|
||||
const endpoint = this.sentinelNatResolve(addressResponseToAddress(sentinel));
|
||||
if (this.sentinelIterator.add(endpoint)) {
|
||||
debug("adding sentinel %s:%s", endpoint.host, endpoint.port);
|
||||
}
|
||||
}
|
||||
});
|
||||
debug("Updated internal sentinels: %s", this.sentinelIterator);
|
||||
}
|
||||
async resolveMaster(client) {
|
||||
const result = await client.sentinel("get-master-addr-by-name", this.options.name);
|
||||
await this.updateSentinels(client);
|
||||
return this.sentinelNatResolve(Array.isArray(result)
|
||||
? { host: result[0], port: Number(result[1]) }
|
||||
: null);
|
||||
}
|
||||
async resolveSlave(client) {
|
||||
const result = await client.sentinel("slaves", this.options.name);
|
||||
if (!Array.isArray(result)) {
|
||||
return null;
|
||||
}
|
||||
const availableSlaves = result
|
||||
.map(utils_1.packObject)
|
||||
.filter((slave) => slave.flags && !slave.flags.match(/(disconnected|s_down|o_down)/));
|
||||
return this.sentinelNatResolve(selectPreferredSentinel(availableSlaves, this.options.preferredSlaves));
|
||||
}
|
||||
sentinelNatResolve(item) {
|
||||
if (!item || !this.options.natMap)
|
||||
return item;
|
||||
const key = `${item.host}:${item.port}`;
|
||||
let result = item;
|
||||
if (typeof this.options.natMap === "function") {
|
||||
result = this.options.natMap(key) || item;
|
||||
}
|
||||
else if (typeof this.options.natMap === "object") {
|
||||
result = this.options.natMap[key] || item;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
connectToSentinel(endpoint, options) {
|
||||
const redis = new Redis_1.default({
|
||||
port: endpoint.port || 26379,
|
||||
host: endpoint.host,
|
||||
username: this.options.sentinelUsername || null,
|
||||
password: this.options.sentinelPassword || null,
|
||||
family: endpoint.family ||
|
||||
// @ts-expect-error
|
||||
("path" in this.options && this.options.path
|
||||
? undefined
|
||||
: // @ts-expect-error
|
||||
this.options.family),
|
||||
tls: this.options.sentinelTLS,
|
||||
retryStrategy: null,
|
||||
enableReadyCheck: false,
|
||||
connectTimeout: this.options.connectTimeout,
|
||||
commandTimeout: this.options.sentinelCommandTimeout,
|
||||
...options,
|
||||
});
|
||||
// @ts-expect-error
|
||||
return redis;
|
||||
}
|
||||
async resolve(endpoint) {
|
||||
const client = this.connectToSentinel(endpoint);
|
||||
// ignore the errors since resolve* methods will handle them
|
||||
client.on("error", noop);
|
||||
try {
|
||||
if (this.options.role === "slave") {
|
||||
return await this.resolveSlave(client);
|
||||
}
|
||||
else {
|
||||
return await this.resolveMaster(client);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
client.disconnect();
|
||||
}
|
||||
}
|
||||
async initFailoverDetector() {
|
||||
var _a;
|
||||
if (!this.options.failoverDetector) {
|
||||
return;
|
||||
}
|
||||
// Move the current sentinel to the first position
|
||||
this.sentinelIterator.reset(true);
|
||||
const sentinels = [];
|
||||
// In case of a large amount of sentinels, limit the number of concurrent connections
|
||||
while (sentinels.length < this.options.sentinelMaxConnections) {
|
||||
const { done, value } = this.sentinelIterator.next();
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
const client = this.connectToSentinel(value, {
|
||||
lazyConnect: true,
|
||||
retryStrategy: this.options.sentinelReconnectStrategy,
|
||||
});
|
||||
client.on("reconnecting", () => {
|
||||
var _a;
|
||||
// Tests listen to this event
|
||||
(_a = this.emitter) === null || _a === void 0 ? void 0 : _a.emit("sentinelReconnecting");
|
||||
});
|
||||
sentinels.push({ address: value, client });
|
||||
}
|
||||
this.sentinelIterator.reset(false);
|
||||
if (this.failoverDetector) {
|
||||
// Clean up previous detector
|
||||
this.failoverDetector.cleanup();
|
||||
}
|
||||
this.failoverDetector = new FailoverDetector_1.FailoverDetector(this, sentinels);
|
||||
await this.failoverDetector.subscribe();
|
||||
// Tests listen to this event
|
||||
(_a = this.emitter) === null || _a === void 0 ? void 0 : _a.emit("failoverSubscribed");
|
||||
}
|
||||
}
|
||||
exports.default = SentinelConnector;
|
||||
function selectPreferredSentinel(availableSlaves, preferredSlaves) {
|
||||
if (availableSlaves.length === 0) {
|
||||
return null;
|
||||
}
|
||||
let selectedSlave;
|
||||
if (typeof preferredSlaves === "function") {
|
||||
selectedSlave = preferredSlaves(availableSlaves);
|
||||
}
|
||||
else if (preferredSlaves !== null && typeof preferredSlaves === "object") {
|
||||
const preferredSlavesArray = Array.isArray(preferredSlaves)
|
||||
? preferredSlaves
|
||||
: [preferredSlaves];
|
||||
// sort by priority
|
||||
preferredSlavesArray.sort((a, b) => {
|
||||
// default the priority to 1
|
||||
if (!a.prio) {
|
||||
a.prio = 1;
|
||||
}
|
||||
if (!b.prio) {
|
||||
b.prio = 1;
|
||||
}
|
||||
// lowest priority first
|
||||
if (a.prio < b.prio) {
|
||||
return -1;
|
||||
}
|
||||
if (a.prio > b.prio) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
});
|
||||
// loop over preferred slaves and return the first match
|
||||
for (let p = 0; p < preferredSlavesArray.length; p++) {
|
||||
for (let a = 0; a < availableSlaves.length; a++) {
|
||||
const slave = availableSlaves[a];
|
||||
if (slave.ip === preferredSlavesArray[p].ip) {
|
||||
if (slave.port === preferredSlavesArray[p].port) {
|
||||
selectedSlave = slave;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (selectedSlave) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// if none of the preferred slaves are available, a random available slave is returned
|
||||
if (!selectedSlave) {
|
||||
selectedSlave = (0, utils_1.sample)(availableSlaves);
|
||||
}
|
||||
return addressResponseToAddress(selectedSlave);
|
||||
}
|
||||
function addressResponseToAddress(input) {
|
||||
return { host: input.ip, port: Number(input.port) };
|
||||
}
|
||||
function noop() { }
|
||||
21
node_modules/ioredis/built/connectors/SentinelConnector/types.d.ts
generated
vendored
Normal file
21
node_modules/ioredis/built/connectors/SentinelConnector/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
import { RedisOptions } from "../../redis/RedisOptions";
|
||||
export interface SentinelAddress {
|
||||
port: number;
|
||||
host: string;
|
||||
family?: number;
|
||||
}
|
||||
export interface RedisClient {
|
||||
options: RedisOptions;
|
||||
sentinel(subcommand: "sentinels", name: string): Promise<string[]>;
|
||||
sentinel(subcommand: "get-master-addr-by-name", name: string): Promise<string[]>;
|
||||
sentinel(subcommand: "slaves", name: string): Promise<string[]>;
|
||||
subscribe(...channelNames: string[]): Promise<number>;
|
||||
on(event: "message", callback: (channel: string, message: string) => void): void;
|
||||
on(event: "error", callback: (error: Error) => void): void;
|
||||
on(event: "reconnecting", callback: () => void): void;
|
||||
disconnect(): void;
|
||||
}
|
||||
export interface Sentinel {
|
||||
address: Partial<SentinelAddress>;
|
||||
client: RedisClient;
|
||||
}
|
||||
2
node_modules/ioredis/built/connectors/SentinelConnector/types.js
generated
vendored
Normal file
2
node_modules/ioredis/built/connectors/SentinelConnector/types.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
17
node_modules/ioredis/built/connectors/StandaloneConnector.d.ts
generated
vendored
Normal file
17
node_modules/ioredis/built/connectors/StandaloneConnector.d.ts
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
/// <reference types="node" />
|
||||
import { IpcNetConnectOpts, TcpNetConnectOpts } from "net";
|
||||
import { ConnectionOptions } from "tls";
|
||||
import { NetStream } from "../types";
|
||||
import AbstractConnector, { ErrorEmitter } from "./AbstractConnector";
|
||||
declare type TcpOptions = Pick<TcpNetConnectOpts, "port" | "host" | "family">;
|
||||
declare type IpcOptions = Pick<IpcNetConnectOpts, "path">;
|
||||
export declare type StandaloneConnectionOptions = Partial<TcpOptions & IpcOptions> & {
|
||||
disconnectTimeout?: number | undefined;
|
||||
tls?: ConnectionOptions | undefined;
|
||||
};
|
||||
export default class StandaloneConnector extends AbstractConnector {
|
||||
protected options: StandaloneConnectionOptions;
|
||||
constructor(options: StandaloneConnectionOptions);
|
||||
connect(_: ErrorEmitter): Promise<NetStream>;
|
||||
}
|
||||
export {};
|
||||
69
node_modules/ioredis/built/connectors/StandaloneConnector.js
generated
vendored
Normal file
69
node_modules/ioredis/built/connectors/StandaloneConnector.js
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const net_1 = require("net");
|
||||
const tls_1 = require("tls");
|
||||
const utils_1 = require("../utils");
|
||||
const AbstractConnector_1 = require("./AbstractConnector");
|
||||
class StandaloneConnector extends AbstractConnector_1.default {
|
||||
constructor(options) {
|
||||
super(options.disconnectTimeout);
|
||||
this.options = options;
|
||||
}
|
||||
connect(_) {
|
||||
const { options } = this;
|
||||
this.connecting = true;
|
||||
let connectionOptions;
|
||||
if ("path" in options && options.path) {
|
||||
connectionOptions = {
|
||||
path: options.path,
|
||||
};
|
||||
}
|
||||
else {
|
||||
connectionOptions = {};
|
||||
if ("port" in options && options.port != null) {
|
||||
connectionOptions.port = options.port;
|
||||
}
|
||||
if ("host" in options && options.host != null) {
|
||||
connectionOptions.host = options.host;
|
||||
}
|
||||
if ("family" in options && options.family != null) {
|
||||
connectionOptions.family = options.family;
|
||||
}
|
||||
}
|
||||
if (options.tls) {
|
||||
Object.assign(connectionOptions, options.tls);
|
||||
}
|
||||
// TODO:
|
||||
// We use native Promise here since other Promise
|
||||
// implementation may use different schedulers that
|
||||
// cause issue when the stream is resolved in the
|
||||
// next tick.
|
||||
// Should use the provided promise in the next major
|
||||
// version and do not connect before resolved.
|
||||
return new Promise((resolve, reject) => {
|
||||
process.nextTick(() => {
|
||||
if (!this.connecting) {
|
||||
reject(new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
return;
|
||||
}
|
||||
try {
|
||||
if (options.tls) {
|
||||
this.stream = (0, tls_1.connect)(connectionOptions);
|
||||
}
|
||||
else {
|
||||
this.stream = (0, net_1.createConnection)(connectionOptions);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
this.stream.once("error", (err) => {
|
||||
this.firstError = err;
|
||||
});
|
||||
resolve(this.stream);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.default = StandaloneConnector;
|
||||
3
node_modules/ioredis/built/connectors/index.d.ts
generated
vendored
Normal file
3
node_modules/ioredis/built/connectors/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import StandaloneConnector from "./StandaloneConnector";
|
||||
import SentinelConnector from "./SentinelConnector";
|
||||
export { StandaloneConnector, SentinelConnector };
|
||||
7
node_modules/ioredis/built/connectors/index.js
generated
vendored
Normal file
7
node_modules/ioredis/built/connectors/index.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SentinelConnector = exports.StandaloneConnector = void 0;
|
||||
const StandaloneConnector_1 = require("./StandaloneConnector");
|
||||
exports.StandaloneConnector = StandaloneConnector_1.default;
|
||||
const SentinelConnector_1 = require("./SentinelConnector");
|
||||
exports.SentinelConnector = SentinelConnector_1.default;
|
||||
9
node_modules/ioredis/built/constants/TLSProfiles.d.ts
generated
vendored
Normal file
9
node_modules/ioredis/built/constants/TLSProfiles.d.ts
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
149
node_modules/ioredis/built/constants/TLSProfiles.js
generated
vendored
Normal file
149
node_modules/ioredis/built/constants/TLSProfiles.js
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* TLS settings for Redis Cloud. Updated on 2022-08-19.
|
||||
*/
|
||||
const RedisCloudCA = `-----BEGIN CERTIFICATE-----
|
||||
MIIDTzCCAjegAwIBAgIJAKSVpiDswLcwMA0GCSqGSIb3DQEBBQUAMD4xFjAUBgNV
|
||||
BAoMDUdhcmFudGlhIERhdGExJDAiBgNVBAMMG1NTTCBDZXJ0aWZpY2F0aW9uIEF1
|
||||
dGhvcml0eTAeFw0xMzEwMDExMjE0NTVaFw0yMzA5MjkxMjE0NTVaMD4xFjAUBgNV
|
||||
BAoMDUdhcmFudGlhIERhdGExJDAiBgNVBAMMG1NTTCBDZXJ0aWZpY2F0aW9uIEF1
|
||||
dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALZqkh/DczWP
|
||||
JnxnHLQ7QL0T4B4CDKWBKCcisriGbA6ZePWVNo4hfKQC6JrzfR+081NeD6VcWUiz
|
||||
rmd+jtPhIY4c+WVQYm5PKaN6DT1imYdxQw7aqO5j2KUCEh/cznpLxeSHoTxlR34E
|
||||
QwF28Wl3eg2vc5ct8LjU3eozWVk3gb7alx9mSA2SgmuX5lEQawl++rSjsBStemY2
|
||||
BDwOpAMXIrdEyP/cVn8mkvi/BDs5M5G+09j0gfhyCzRWMQ7Hn71u1eolRxwVxgi3
|
||||
TMn+/vTaFSqxKjgck6zuAYjBRPaHe7qLxHNr1So/Mc9nPy+3wHebFwbIcnUojwbp
|
||||
4nctkWbjb2cCAwEAAaNQME4wHQYDVR0OBBYEFP1whtcrydmW3ZJeuSoKZIKjze3w
|
||||
MB8GA1UdIwQYMBaAFP1whtcrydmW3ZJeuSoKZIKjze3wMAwGA1UdEwQFMAMBAf8w
|
||||
DQYJKoZIhvcNAQEFBQADggEBAG2erXhwRAa7+ZOBs0B6X57Hwyd1R4kfmXcs0rta
|
||||
lbPpvgULSiB+TCbf3EbhJnHGyvdCY1tvlffLjdA7HJ0PCOn+YYLBA0pTU/dyvrN6
|
||||
Su8NuS5yubnt9mb13nDGYo1rnt0YRfxN+8DM3fXIVr038A30UlPX2Ou1ExFJT0MZ
|
||||
uFKY6ZvLdI6/1cbgmguMlAhM+DhKyV6Sr5699LM3zqeI816pZmlREETYkGr91q7k
|
||||
BpXJu/dtHaGxg1ZGu6w/PCsYGUcECWENYD4VQPd8N32JjOfu6vEgoEAwfPP+3oGp
|
||||
Z4m3ewACcWOAenqflb+cQYC4PsF7qbXDmRaWrbKntOlZ3n0=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGMTCCBBmgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwajELMAkGA1UEBhMCVVMx
|
||||
CzAJBgNVBAgMAkNBMQswCQYDVQQHDAJDQTESMBAGA1UECgwJUmVkaXNMYWJzMS0w
|
||||
KwYDVQQDDCRSZWRpc0xhYnMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN
|
||||
MTgwMjI1MTUzNzM3WhcNMjgwMjIzMTUzNzM3WjBfMQswCQYDVQQGEwJVUzELMAkG
|
||||
A1UECAwCQ0ExEjAQBgNVBAoMCVJlZGlzTGFiczEvMC0GA1UEAwwmUkNQIEludGVy
|
||||
bWVkaWF0ZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
|
||||
A4ICDwAwggIKAoICAQDf9dqbxc8Bq7Ctq9rWcxrGNKKHivqLAFpPq02yLPx6fsOv
|
||||
Tq7GsDChAYBBc4v7Y2Ap9RD5Vs3dIhEANcnolf27QwrG9RMnnvzk8pCvp1o6zSU4
|
||||
VuOE1W66/O1/7e2rVxyrnTcP7UgK43zNIXu7+tiAqWsO92uSnuMoGPGpeaUm1jym
|
||||
hjWKtkAwDFSqvHY+XL5qDVBEjeUe+WHkYUg40cAXjusAqgm2hZt29c2wnVrxW25W
|
||||
P0meNlzHGFdA2AC5z54iRiqj57dTfBTkHoBczQxcyw6hhzxZQ4e5I5zOKjXXEhZN
|
||||
r0tA3YC14CTabKRus/JmZieyZzRgEy2oti64tmLYTqSlAD78pRL40VNoaSYetXLw
|
||||
hhNsXCHgWaY6d5bLOc/aIQMAV5oLvZQKvuXAF1IDmhPA+bZbpWipp0zagf1P1H3s
|
||||
UzsMdn2KM0ejzgotbtNlj5TcrVwpmvE3ktvUAuA+hi3FkVx1US+2Gsp5x4YOzJ7u
|
||||
P1WPk6ShF0JgnJH2ILdj6kttTWwFzH17keSFICWDfH/+kM+k7Y1v3EXMQXE7y0T9
|
||||
MjvJskz6d/nv+sQhY04xt64xFMGTnZjlJMzfQNi7zWFLTZnDD0lPowq7l3YiPoTT
|
||||
t5Xky83lu0KZsZBo0WlWaDG00gLVdtRgVbcuSWxpi5BdLb1kRab66JptWjxwXQID
|
||||
AQABo4HrMIHoMDoGA1UdHwQzMDEwL6AtoCuGKWh0dHBzOi8vcmwtY2Etc2VydmVy
|
||||
LnJlZGlzbGFicy5jb20vdjEvY3JsMEYGCCsGAQUFBwEBBDowODA2BggrBgEFBQcw
|
||||
AYYqaHR0cHM6Ly9ybC1jYS1zZXJ2ZXIucmVkaXNsYWJzLmNvbS92MS9vY3NwMB0G
|
||||
A1UdDgQWBBQHar5OKvQUpP2qWt6mckzToeCOHDAfBgNVHSMEGDAWgBQi42wH6hM4
|
||||
L2sujEvLM0/u8lRXTzASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQEAwIB
|
||||
hjANBgkqhkiG9w0BAQsFAAOCAgEAirEn/iTsAKyhd+pu2W3Z5NjCko4NPU0EYUbr
|
||||
AP7+POK2rzjIrJO3nFYQ/LLuC7KCXG+2qwan2SAOGmqWst13Y+WHp44Kae0kaChW
|
||||
vcYLXXSoGQGC8QuFSNUdaeg3RbMDYFT04dOkqufeWVccoHVxyTSg9eD8LZuHn5jw
|
||||
7QDLiEECBmIJHk5Eeo2TAZrx4Yx6ufSUX5HeVjlAzqwtAqdt99uCJ/EL8bgpWbe+
|
||||
XoSpvUv0SEC1I1dCAhCKAvRlIOA6VBcmzg5Am12KzkqTul12/VEFIgzqu0Zy2Jbc
|
||||
AUPrYVu/+tOGXQaijy7YgwH8P8n3s7ZeUa1VABJHcxrxYduDDJBLZi+MjheUDaZ1
|
||||
jQRHYevI2tlqeSBqdPKG4zBY5lS0GiAlmuze5oENt0P3XboHoZPHiqcK3VECgTVh
|
||||
/BkJcuudETSJcZDmQ8YfoKfBzRQNg2sv/hwvUv73Ss51Sco8GEt2lD8uEdib1Q6z
|
||||
zDT5lXJowSzOD5ZA9OGDjnSRL+2riNtKWKEqvtEG3VBJoBzu9GoxbAc7wIZLxmli
|
||||
iF5a/Zf5X+UXD3s4TMmy6C4QZJpAA2egsSQCnraWO2ULhh7iXMysSkF/nzVfZn43
|
||||
iqpaB8++9a37hWq14ZmOv0TJIDz//b2+KC4VFXWQ5W5QC6whsjT+OlG4p5ZYG0jo
|
||||
616pxqo=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFujCCA6KgAwIBAgIJAJ1aTT1lu2ScMA0GCSqGSIb3DQEBCwUAMGoxCzAJBgNV
|
||||
BAYTAlVTMQswCQYDVQQIDAJDQTELMAkGA1UEBwwCQ0ExEjAQBgNVBAoMCVJlZGlz
|
||||
TGFiczEtMCsGA1UEAwwkUmVkaXNMYWJzIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9y
|
||||
aXR5MB4XDTE4MDIyNTE1MjA0MloXDTM4MDIyMDE1MjA0MlowajELMAkGA1UEBhMC
|
||||
VVMxCzAJBgNVBAgMAkNBMQswCQYDVQQHDAJDQTESMBAGA1UECgwJUmVkaXNMYWJz
|
||||
MS0wKwYDVQQDDCRSZWRpc0xhYnMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkw
|
||||
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDLEjXy7YrbN5Waau5cd6g1
|
||||
G5C2tMmeTpZ0duFAPxNU4oE3RHS5gGiok346fUXuUxbZ6QkuzeN2/2Z+RmRcJhQY
|
||||
Dm0ZgdG4x59An1TJfnzKKoWj8ISmoHS/TGNBdFzXV7FYNLBuqZouqePI6ReC6Qhl
|
||||
pp45huV32Q3a6IDrrvx7Wo5ZczEQeFNbCeCOQYNDdTmCyEkHqc2AGo8eoIlSTutT
|
||||
ULOC7R5gzJVTS0e1hesQ7jmqHjbO+VQS1NAL4/5K6cuTEqUl+XhVhPdLWBXJQ5ag
|
||||
54qhX4v+ojLzeU1R/Vc6NjMvVtptWY6JihpgplprN0Yh2556ewcXMeturcKgXfGJ
|
||||
xeYzsjzXerEjrVocX5V8BNrg64NlifzTMKNOOv4fVZszq1SIHR8F9ROrqiOdh8iC
|
||||
JpUbLpXH9hWCSEO6VRMB2xJoKu3cgl63kF30s77x7wLFMEHiwsQRKxooE1UhgS9K
|
||||
2sO4TlQ1eWUvFvHSTVDQDlGQ6zu4qjbOpb3Q8bQwoK+ai2alkXVR4Ltxe9QlgYK3
|
||||
StsnPhruzZGA0wbXdpw0bnM+YdlEm5ffSTpNIfgHeaa7Dtb801FtA71ZlH7A6TaI
|
||||
SIQuUST9EKmv7xrJyx0W1pGoPOLw5T029aTjnICSLdtV9bLwysrLhIYG5bnPq78B
|
||||
cS+jZHFGzD7PUVGQD01nOQIDAQABo2MwYTAdBgNVHQ4EFgQUIuNsB+oTOC9rLoxL
|
||||
yzNP7vJUV08wHwYDVR0jBBgwFoAUIuNsB+oTOC9rLoxLyzNP7vJUV08wDwYDVR0T
|
||||
AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAHfg
|
||||
z5pMNUAKdMzK1aS1EDdK9yKz4qicILz5czSLj1mC7HKDRy8cVADUxEICis++CsCu
|
||||
rYOvyCVergHQLREcxPq4rc5Nq1uj6J6649NEeh4WazOOjL4ZfQ1jVznMbGy+fJm3
|
||||
3Hoelv6jWRG9iqeJZja7/1s6YC6bWymI/OY1e4wUKeNHAo+Vger7MlHV+RuabaX+
|
||||
hSJ8bJAM59NCM7AgMTQpJCncrcdLeceYniGy5Q/qt2b5mJkQVkIdy4TPGGB+AXDJ
|
||||
D0q3I/JDRkDUFNFdeW0js7fHdsvCR7O3tJy5zIgEV/o/BCkmJVtuwPYOrw/yOlKj
|
||||
TY/U7ATAx9VFF6/vYEOMYSmrZlFX+98L6nJtwDqfLB5VTltqZ4H/KBxGE3IRSt9l
|
||||
FXy40U+LnXzhhW+7VBAvyYX8GEXhHkKU8Gqk1xitrqfBXY74xKgyUSTolFSfFVgj
|
||||
mcM/X4K45bka+qpkj7Kfv/8D4j6aZekwhN2ly6hhC1SmQ8qjMjpG/mrWOSSHZFmf
|
||||
ybu9iD2AYHeIOkshIl6xYIa++Q/00/vs46IzAbQyriOi0XxlSMMVtPx0Q3isp+ji
|
||||
n8Mq9eOuxYOEQ4of8twUkUDd528iwGtEdwf0Q01UyT84S62N8AySl1ZBKXJz6W4F
|
||||
UhWfa/HQYOAPDdEjNgnVwLI23b8t0TozyCWw7q8h
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEjzCCA3egAwIBAgIQe55B/ALCKJDZtdNT8kD6hTANBgkqhkiG9w0BAQsFADBM
|
||||
MSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xv
|
||||
YmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjAeFw0yMjAxMjYxMjAwMDBaFw0y
|
||||
NTAxMjYwMDAwMDBaMFgxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWdu
|
||||
IG52LXNhMS4wLAYDVQQDEyVHbG9iYWxTaWduIEF0bGFzIFIzIE9WIFRMUyBDQSAy
|
||||
MDIyIFEyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmGmg1LW9b7Lf
|
||||
8zDD83yBDTEkt+FOxKJZqF4veWc5KZsQj9HfnUS2e5nj/E+JImlGPsQuoiosLuXD
|
||||
BVBNAMcUFa11buFMGMeEMwiTmCXoXRrXQmH0qjpOfKgYc5gHG3BsRGaRrf7VR4eg
|
||||
ofNMG9wUBw4/g/TT7+bQJdA4NfE7Y4d5gEryZiBGB/swaX6Jp/8MF4TgUmOWmalK
|
||||
dZCKyb4sPGQFRTtElk67F7vU+wdGcrcOx1tDcIB0ncjLPMnaFicagl+daWGsKqTh
|
||||
counQb6QJtYHa91KvCfKWocMxQ7OIbB5UARLPmC4CJ1/f8YFm35ebfzAeULYdGXu
|
||||
jE9CLor0OwIDAQABo4IBXzCCAVswDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQG
|
||||
CCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQW
|
||||
BBSH5Zq7a7B/t95GfJWkDBpA8HHqdjAfBgNVHSMEGDAWgBSP8Et/qC5FJK5NUPpj
|
||||
move4t0bvDB7BggrBgEFBQcBAQRvMG0wLgYIKwYBBQUHMAGGImh0dHA6Ly9vY3Nw
|
||||
Mi5nbG9iYWxzaWduLmNvbS9yb290cjMwOwYIKwYBBQUHMAKGL2h0dHA6Ly9zZWN1
|
||||
cmUuZ2xvYmFsc2lnbi5jb20vY2FjZXJ0L3Jvb3QtcjMuY3J0MDYGA1UdHwQvMC0w
|
||||
K6ApoCeGJWh0dHA6Ly9jcmwuZ2xvYmFsc2lnbi5jb20vcm9vdC1yMy5jcmwwIQYD
|
||||
VR0gBBowGDAIBgZngQwBAgIwDAYKKwYBBAGgMgoBAjANBgkqhkiG9w0BAQsFAAOC
|
||||
AQEAKRic9/f+nmhQU/wz04APZLjgG5OgsuUOyUEZjKVhNGDwxGTvKhyXGGAMW2B/
|
||||
3bRi+aElpXwoxu3pL6fkElbX3B0BeS5LoDtxkyiVEBMZ8m+sXbocwlPyxrPbX6mY
|
||||
0rVIvnuUeBH8X0L5IwfpNVvKnBIilTbcebfHyXkPezGwz7E1yhUULjJFm2bt0SdX
|
||||
y+4X/WeiiYIv+fTVgZZgl+/2MKIsu/qdBJc3f3TvJ8nz+Eax1zgZmww+RSQWeOj3
|
||||
15Iw6Z5FX+NwzY/Ab+9PosR5UosSeq+9HhtaxZttXG1nVh+avYPGYddWmiMT90J5
|
||||
ZgKnO/Fx2hBgTxhOTMYaD312kg==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
|
||||
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
|
||||
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
|
||||
MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
|
||||
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
|
||||
hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
|
||||
RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
|
||||
gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
|
||||
KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
|
||||
QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
|
||||
XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
|
||||
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
|
||||
LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
|
||||
RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
|
||||
jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
|
||||
6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
|
||||
mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
|
||||
Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
|
||||
WD9f
|
||||
-----END CERTIFICATE-----`;
|
||||
const TLSProfiles = {
|
||||
RedisCloudFixed: { ca: RedisCloudCA },
|
||||
RedisCloudFlexible: { ca: RedisCloudCA },
|
||||
};
|
||||
exports.default = TLSProfiles;
|
||||
7
node_modules/ioredis/built/errors/ClusterAllFailedError.d.ts
generated
vendored
Normal file
7
node_modules/ioredis/built/errors/ClusterAllFailedError.d.ts
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
import { RedisError } from "redis-errors";
|
||||
export default class ClusterAllFailedError extends RedisError {
|
||||
lastNodeError: RedisError;
|
||||
static defaultMessage: string;
|
||||
constructor(message: any, lastNodeError: RedisError);
|
||||
get name(): string;
|
||||
}
|
||||
15
node_modules/ioredis/built/errors/ClusterAllFailedError.js
generated
vendored
Normal file
15
node_modules/ioredis/built/errors/ClusterAllFailedError.js
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const redis_errors_1 = require("redis-errors");
|
||||
class ClusterAllFailedError extends redis_errors_1.RedisError {
|
||||
constructor(message, lastNodeError) {
|
||||
super(message);
|
||||
this.lastNodeError = lastNodeError;
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
}
|
||||
get name() {
|
||||
return this.constructor.name;
|
||||
}
|
||||
}
|
||||
exports.default = ClusterAllFailedError;
|
||||
ClusterAllFailedError.defaultMessage = "Failed to refresh slots cache.";
|
||||
5
node_modules/ioredis/built/errors/MaxRetriesPerRequestError.d.ts
generated
vendored
Normal file
5
node_modules/ioredis/built/errors/MaxRetriesPerRequestError.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import { AbortError } from "redis-errors";
|
||||
export default class MaxRetriesPerRequestError extends AbortError {
|
||||
constructor(maxRetriesPerRequest: number);
|
||||
get name(): string;
|
||||
}
|
||||
14
node_modules/ioredis/built/errors/MaxRetriesPerRequestError.js
generated
vendored
Normal file
14
node_modules/ioredis/built/errors/MaxRetriesPerRequestError.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const redis_errors_1 = require("redis-errors");
|
||||
class MaxRetriesPerRequestError extends redis_errors_1.AbortError {
|
||||
constructor(maxRetriesPerRequest) {
|
||||
const message = `Reached the max retries per request limit (which is ${maxRetriesPerRequest}). Refer to "maxRetriesPerRequest" option for details.`;
|
||||
super(message);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
}
|
||||
get name() {
|
||||
return this.constructor.name;
|
||||
}
|
||||
}
|
||||
exports.default = MaxRetriesPerRequestError;
|
||||
2
node_modules/ioredis/built/errors/index.d.ts
generated
vendored
Normal file
2
node_modules/ioredis/built/errors/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
import MaxRetriesPerRequestError from "./MaxRetriesPerRequestError";
|
||||
export { MaxRetriesPerRequestError };
|
||||
5
node_modules/ioredis/built/errors/index.js
generated
vendored
Normal file
5
node_modules/ioredis/built/errors/index.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MaxRetriesPerRequestError = void 0;
|
||||
const MaxRetriesPerRequestError_1 = require("./MaxRetriesPerRequestError");
|
||||
exports.MaxRetriesPerRequestError = MaxRetriesPerRequestError_1.default;
|
||||
43
node_modules/ioredis/built/index.d.ts
generated
vendored
Normal file
43
node_modules/ioredis/built/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
export { default } from "./Redis";
|
||||
export { default as Redis } from "./Redis";
|
||||
export { default as Cluster } from "./cluster";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { default as Command } from "./Command";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { default as RedisCommander, Result, ClientContext, } from "./utils/RedisCommander";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { default as ScanStream } from "./ScanStream";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { default as Pipeline } from "./Pipeline";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { default as AbstractConnector } from "./connectors/AbstractConnector";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { default as SentinelConnector, SentinelIterator, } from "./connectors/SentinelConnector";
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export { Callback } from "./types";
|
||||
export { SentinelAddress, SentinelConnectionOptions, } from "./connectors/SentinelConnector";
|
||||
export { StandaloneConnectionOptions } from "./connectors/StandaloneConnector";
|
||||
export { RedisOptions, CommonRedisOptions } from "./redis/RedisOptions";
|
||||
export { ClusterNode } from "./cluster";
|
||||
export { ClusterOptions, DNSLookupFunction, DNSResolveSrvFunction, NatMap, } from "./cluster/ClusterOptions";
|
||||
export { NodeRole } from "./cluster/util";
|
||||
export type { RedisKey, RedisValue, ChainableCommander, } from "./utils/RedisCommander";
|
||||
export declare const ReplyError: any;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
export declare function print(err: Error | null, reply?: any): void;
|
||||
62
node_modules/ioredis/built/index.js
generated
vendored
Normal file
62
node_modules/ioredis/built/index.js
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.print = exports.ReplyError = exports.SentinelIterator = exports.SentinelConnector = exports.AbstractConnector = exports.Pipeline = exports.ScanStream = exports.Command = exports.Cluster = exports.Redis = exports.default = void 0;
|
||||
exports = module.exports = require("./Redis").default;
|
||||
var Redis_1 = require("./Redis");
|
||||
Object.defineProperty(exports, "default", { enumerable: true, get: function () { return Redis_1.default; } });
|
||||
var Redis_2 = require("./Redis");
|
||||
Object.defineProperty(exports, "Redis", { enumerable: true, get: function () { return Redis_2.default; } });
|
||||
var cluster_1 = require("./cluster");
|
||||
Object.defineProperty(exports, "Cluster", { enumerable: true, get: function () { return cluster_1.default; } });
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
var Command_1 = require("./Command");
|
||||
Object.defineProperty(exports, "Command", { enumerable: true, get: function () { return Command_1.default; } });
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
var ScanStream_1 = require("./ScanStream");
|
||||
Object.defineProperty(exports, "ScanStream", { enumerable: true, get: function () { return ScanStream_1.default; } });
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
var Pipeline_1 = require("./Pipeline");
|
||||
Object.defineProperty(exports, "Pipeline", { enumerable: true, get: function () { return Pipeline_1.default; } });
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
var AbstractConnector_1 = require("./connectors/AbstractConnector");
|
||||
Object.defineProperty(exports, "AbstractConnector", { enumerable: true, get: function () { return AbstractConnector_1.default; } });
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
var SentinelConnector_1 = require("./connectors/SentinelConnector");
|
||||
Object.defineProperty(exports, "SentinelConnector", { enumerable: true, get: function () { return SentinelConnector_1.default; } });
|
||||
Object.defineProperty(exports, "SentinelIterator", { enumerable: true, get: function () { return SentinelConnector_1.SentinelIterator; } });
|
||||
// No TS typings
|
||||
exports.ReplyError = require("redis-errors").ReplyError;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
Object.defineProperty(exports, "Promise", {
|
||||
get() {
|
||||
console.warn("ioredis v5 does not support plugging third-party Promise library anymore. Native Promise will be used.");
|
||||
return Promise;
|
||||
},
|
||||
set(_lib) {
|
||||
console.warn("ioredis v5 does not support plugging third-party Promise library anymore. Native Promise will be used.");
|
||||
},
|
||||
});
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
function print(err, reply) {
|
||||
if (err) {
|
||||
console.log("Error: " + err);
|
||||
}
|
||||
else {
|
||||
console.log("Reply: " + reply);
|
||||
}
|
||||
}
|
||||
exports.print = print;
|
||||
197
node_modules/ioredis/built/redis/RedisOptions.d.ts
generated
vendored
Normal file
197
node_modules/ioredis/built/redis/RedisOptions.d.ts
generated
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
import { CommanderOptions } from "../utils/Commander";
|
||||
import ConnectorConstructor from "../connectors/ConnectorConstructor";
|
||||
import { SentinelConnectionOptions } from "../connectors/SentinelConnector";
|
||||
import { StandaloneConnectionOptions } from "../connectors/StandaloneConnector";
|
||||
export declare type ReconnectOnError = (err: Error) => boolean | 1 | 2;
|
||||
export interface CommonRedisOptions extends CommanderOptions {
|
||||
Connector?: ConnectorConstructor | undefined;
|
||||
retryStrategy?: ((times: number) => number | void | null) | undefined;
|
||||
/**
|
||||
* If a command does not return a reply within a set number of milliseconds,
|
||||
* a "Command timed out" error will be thrown.
|
||||
*/
|
||||
commandTimeout?: number | undefined;
|
||||
/**
|
||||
* Enables client-side timeout protection for blocking commands when set
|
||||
* to a positive number. If `blockingTimeout` is undefined, `0`, or
|
||||
* negative (e.g. `-1`), the protection is disabled and no client-side
|
||||
* timers are installed for blocking commands.
|
||||
*/
|
||||
blockingTimeout?: number | undefined;
|
||||
/**
|
||||
* Grace period (ms) added to blocking command timeouts. Only used when
|
||||
* `blockingTimeout` is a positive number. Defaults to 100ms.
|
||||
*/
|
||||
blockingTimeoutGrace?: number | undefined;
|
||||
/**
|
||||
* If the socket does not receive data within a set number of milliseconds:
|
||||
* 1. the socket is considered "dead" and will be destroyed
|
||||
* 2. the client will reject any running commands (altought they might have been processed by the server)
|
||||
* 3. the reconnect strategy will kick in (depending on the configuration)
|
||||
*/
|
||||
socketTimeout?: number | undefined;
|
||||
/**
|
||||
* Enable/disable keep-alive functionality.
|
||||
* @link https://nodejs.org/api/net.html#socketsetkeepaliveenable-initialdelay
|
||||
* @default 0
|
||||
*/
|
||||
keepAlive?: number | undefined;
|
||||
/**
|
||||
* Enable/disable the use of Nagle's algorithm.
|
||||
* @link https://nodejs.org/api/net.html#socketsetnodelaynodelay
|
||||
* @default true
|
||||
*/
|
||||
noDelay?: boolean | undefined;
|
||||
/**
|
||||
* Set the name of the connection to make it easier to identity the connection
|
||||
* in client list.
|
||||
* @link https://redis.io/commands/client-setname
|
||||
*/
|
||||
connectionName?: string | undefined;
|
||||
/**
|
||||
* If true, skips setting library info via CLIENT SETINFO.
|
||||
* @link https://redis.io/docs/latest/commands/client-setinfo/
|
||||
* @default false
|
||||
*/
|
||||
disableClientInfo?: boolean | undefined;
|
||||
/**
|
||||
* Tag to append to the library name in CLIENT SETINFO (ioredis(tag)).
|
||||
* @link https://redis.io/docs/latest/commands/client-setinfo/
|
||||
* @default undefined
|
||||
*/
|
||||
clientInfoTag?: string | undefined;
|
||||
/**
|
||||
* If set, client will send AUTH command with the value of this option as the first argument when connected.
|
||||
* This is supported since Redis 6.
|
||||
*/
|
||||
username?: string | undefined;
|
||||
/**
|
||||
* If set, client will send AUTH command with the value of this option when connected.
|
||||
*/
|
||||
password?: string | undefined;
|
||||
/**
|
||||
* Database index to use.
|
||||
*
|
||||
* @default 0
|
||||
*/
|
||||
db?: number | undefined;
|
||||
/**
|
||||
* When the client reconnects, channels subscribed in the previous connection will be
|
||||
* resubscribed automatically if `autoResubscribe` is `true`.
|
||||
* @default true
|
||||
*/
|
||||
autoResubscribe?: boolean | undefined;
|
||||
/**
|
||||
* Whether or not to resend unfulfilled commands on reconnect.
|
||||
* Unfulfilled commands are most likely to be blocking commands such as `brpop` or `blpop`.
|
||||
* @default true
|
||||
*/
|
||||
autoResendUnfulfilledCommands?: boolean | undefined;
|
||||
/**
|
||||
* Whether or not to reconnect on certain Redis errors.
|
||||
* This options by default is `null`, which means it should never reconnect on Redis errors.
|
||||
* You can pass a function that accepts an Redis error, and returns:
|
||||
* - `true` or `1` to trigger a reconnection.
|
||||
* - `false` or `0` to not reconnect.
|
||||
* - `2` to reconnect and resend the failed command (who triggered the error) after reconnection.
|
||||
* @example
|
||||
* ```js
|
||||
* const redis = new Redis({
|
||||
* reconnectOnError(err) {
|
||||
* const targetError = "READONLY";
|
||||
* if (err.message.includes(targetError)) {
|
||||
* // Only reconnect when the error contains "READONLY"
|
||||
* return true; // or `return 1;`
|
||||
* }
|
||||
* },
|
||||
* });
|
||||
* ```
|
||||
* @default null
|
||||
*/
|
||||
reconnectOnError?: ReconnectOnError | null | undefined;
|
||||
/**
|
||||
* @default false
|
||||
*/
|
||||
readOnly?: boolean | undefined;
|
||||
/**
|
||||
* When enabled, numbers returned by Redis will be converted to JavaScript strings instead of numbers.
|
||||
* This is necessary if you want to handle big numbers (above `Number.MAX_SAFE_INTEGER` === 2^53).
|
||||
* @default false
|
||||
*/
|
||||
stringNumbers?: boolean | undefined;
|
||||
/**
|
||||
* How long the client will wait before killing a socket due to inactivity during initial connection.
|
||||
* @default 10000
|
||||
*/
|
||||
connectTimeout?: number | undefined;
|
||||
/**
|
||||
* This option is used internally when you call `redis.monitor()` to tell Redis
|
||||
* to enter the monitor mode when the connection is established.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
monitor?: boolean | undefined;
|
||||
/**
|
||||
* The commands that don't get a reply due to the connection to the server is lost are
|
||||
* put into a queue and will be resent on reconnect (if allowed by the `retryStrategy` option).
|
||||
* This option is used to configure how many reconnection attempts should be allowed before
|
||||
* the queue is flushed with a `MaxRetriesPerRequestError` error.
|
||||
* Set this options to `null` instead of a number to let commands wait forever
|
||||
* until the connection is alive again.
|
||||
*
|
||||
* @default 20
|
||||
*/
|
||||
maxRetriesPerRequest?: number | null | undefined;
|
||||
/**
|
||||
* @default 10000
|
||||
*/
|
||||
maxLoadingRetryTime?: number | undefined;
|
||||
/**
|
||||
* @default false
|
||||
*/
|
||||
enableAutoPipelining?: boolean | undefined;
|
||||
/**
|
||||
* @default []
|
||||
*/
|
||||
autoPipeliningIgnoredCommands?: string[] | undefined;
|
||||
offlineQueue?: boolean | undefined;
|
||||
commandQueue?: boolean | undefined;
|
||||
/**
|
||||
*
|
||||
* By default, if the connection to Redis server has not been established, commands are added to a queue
|
||||
* and are executed once the connection is "ready" (when `enableReadyCheck` is true, "ready" means
|
||||
* the Redis server has loaded the database from disk, otherwise means the connection to the Redis
|
||||
* server has been established). If this option is false, when execute the command when the connection
|
||||
* isn't ready, an error will be returned.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
enableOfflineQueue?: boolean | undefined;
|
||||
/**
|
||||
* The client will sent an INFO command to check whether the server is still loading data from the disk (
|
||||
* which happens when the server is just launched) when the connection is established, and only wait until
|
||||
* the loading process is finished before emitting the `ready` event.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
enableReadyCheck?: boolean | undefined;
|
||||
/**
|
||||
* When a Redis instance is initialized, a connection to the server is immediately established. Set this to
|
||||
* true will delay the connection to the server until the first command is sent or `redis.connect()` is called
|
||||
* explicitly. When `redis.connect()` is called explicitly, a Promise is returned, which will be resolved
|
||||
* when the connection is ready or rejected when it fails. The rejection should be handled by the user.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
lazyConnect?: boolean | undefined;
|
||||
/**
|
||||
* @default undefined
|
||||
*/
|
||||
scripts?: Record<string, {
|
||||
lua: string;
|
||||
numberOfKeys?: number | undefined;
|
||||
readOnly?: boolean | undefined;
|
||||
}> | undefined;
|
||||
}
|
||||
export declare type RedisOptions = CommonRedisOptions & SentinelConnectionOptions & StandaloneConnectionOptions;
|
||||
export declare const DEFAULT_REDIS_OPTIONS: RedisOptions;
|
||||
58
node_modules/ioredis/built/redis/RedisOptions.js
generated
vendored
Normal file
58
node_modules/ioredis/built/redis/RedisOptions.js
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DEFAULT_REDIS_OPTIONS = void 0;
|
||||
exports.DEFAULT_REDIS_OPTIONS = {
|
||||
// Connection
|
||||
port: 6379,
|
||||
host: "localhost",
|
||||
family: 0,
|
||||
connectTimeout: 10000,
|
||||
disconnectTimeout: 2000,
|
||||
retryStrategy: function (times) {
|
||||
return Math.min(times * 50, 2000);
|
||||
},
|
||||
keepAlive: 0,
|
||||
noDelay: true,
|
||||
connectionName: null,
|
||||
disableClientInfo: false,
|
||||
clientInfoTag: undefined,
|
||||
// Sentinel
|
||||
sentinels: null,
|
||||
name: null,
|
||||
role: "master",
|
||||
sentinelRetryStrategy: function (times) {
|
||||
return Math.min(times * 10, 1000);
|
||||
},
|
||||
sentinelReconnectStrategy: function () {
|
||||
// This strategy only applies when sentinels are used for detecting
|
||||
// a failover, not during initial master resolution.
|
||||
// The deployment can still function when some of the sentinels are down
|
||||
// for a long period of time, so we may not want to attempt reconnection
|
||||
// very often. Therefore the default interval is fairly long (1 minute).
|
||||
return 60000;
|
||||
},
|
||||
natMap: null,
|
||||
enableTLSForSentinelMode: false,
|
||||
updateSentinels: true,
|
||||
failoverDetector: false,
|
||||
// Status
|
||||
username: null,
|
||||
password: null,
|
||||
db: 0,
|
||||
// Others
|
||||
enableOfflineQueue: true,
|
||||
enableReadyCheck: true,
|
||||
autoResubscribe: true,
|
||||
autoResendUnfulfilledCommands: true,
|
||||
lazyConnect: false,
|
||||
keyPrefix: "",
|
||||
reconnectOnError: null,
|
||||
readOnly: false,
|
||||
stringNumbers: false,
|
||||
maxRetriesPerRequest: 20,
|
||||
maxLoadingRetryTime: 10000,
|
||||
enableAutoPipelining: false,
|
||||
autoPipeliningIgnoredCommands: [],
|
||||
sentinelMaxConnections: 10,
|
||||
blockingTimeoutGrace: 100,
|
||||
};
|
||||
4
node_modules/ioredis/built/redis/event_handler.d.ts
generated
vendored
Normal file
4
node_modules/ioredis/built/redis/event_handler.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export declare function connectHandler(self: any): () => void;
|
||||
export declare function closeHandler(self: any): () => void;
|
||||
export declare function errorHandler(self: any): (error: any) => void;
|
||||
export declare function readyHandler(self: any): () => void;
|
||||
315
node_modules/ioredis/built/redis/event_handler.js
generated
vendored
Normal file
315
node_modules/ioredis/built/redis/event_handler.js
generated
vendored
Normal file
@@ -0,0 +1,315 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.readyHandler = exports.errorHandler = exports.closeHandler = exports.connectHandler = void 0;
|
||||
const redis_errors_1 = require("redis-errors");
|
||||
const Command_1 = require("../Command");
|
||||
const errors_1 = require("../errors");
|
||||
const utils_1 = require("../utils");
|
||||
const DataHandler_1 = require("../DataHandler");
|
||||
const debug = (0, utils_1.Debug)("connection");
|
||||
function connectHandler(self) {
|
||||
return function () {
|
||||
var _a;
|
||||
self.setStatus("connect");
|
||||
self.resetCommandQueue();
|
||||
// AUTH command should be processed before any other commands
|
||||
let flushed = false;
|
||||
const { connectionEpoch } = self;
|
||||
if (self.condition.auth) {
|
||||
self.auth(self.condition.auth, function (err) {
|
||||
if (connectionEpoch !== self.connectionEpoch) {
|
||||
return;
|
||||
}
|
||||
if (err) {
|
||||
if (err.message.indexOf("no password is set") !== -1) {
|
||||
console.warn("[WARN] Redis server does not require a password, but a password was supplied.");
|
||||
}
|
||||
else if (err.message.indexOf("without any password configured for the default user") !== -1) {
|
||||
console.warn("[WARN] This Redis server's `default` user does not require a password, but a password was supplied");
|
||||
}
|
||||
else if (err.message.indexOf("wrong number of arguments for 'auth' command") !== -1) {
|
||||
console.warn(`[ERROR] The server returned "wrong number of arguments for 'auth' command". You are probably passing both username and password to Redis version 5 or below. You should only pass the 'password' option for Redis version 5 and under.`);
|
||||
}
|
||||
else {
|
||||
flushed = true;
|
||||
self.recoverFromFatalError(err, err);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
if (self.condition.select) {
|
||||
self.select(self.condition.select).catch((err) => {
|
||||
// If the node is in cluster mode, select is disallowed.
|
||||
// In this case, reconnect won't help.
|
||||
self.silentEmit("error", err);
|
||||
});
|
||||
}
|
||||
/*
|
||||
No need to keep the reference of DataHandler here
|
||||
because we don't need to do the cleanup.
|
||||
`Stream#end()` will remove all listeners for us.
|
||||
*/
|
||||
new DataHandler_1.default(self, {
|
||||
stringNumbers: self.options.stringNumbers,
|
||||
});
|
||||
const clientCommandPromises = [];
|
||||
if (self.options.connectionName) {
|
||||
debug("set the connection name [%s]", self.options.connectionName);
|
||||
clientCommandPromises.push(self.client("setname", self.options.connectionName).catch(utils_1.noop));
|
||||
}
|
||||
if (!self.options.disableClientInfo) {
|
||||
debug("set the client info");
|
||||
clientCommandPromises.push((0, utils_1.getPackageMeta)()
|
||||
.then((packageMeta) => {
|
||||
return self
|
||||
.client("SETINFO", "LIB-VER", packageMeta.version)
|
||||
.catch(utils_1.noop);
|
||||
})
|
||||
.catch(utils_1.noop));
|
||||
clientCommandPromises.push(self
|
||||
.client("SETINFO", "LIB-NAME", ((_a = self.options) === null || _a === void 0 ? void 0 : _a.clientInfoTag)
|
||||
? `ioredis(${self.options.clientInfoTag})`
|
||||
: "ioredis")
|
||||
.catch(utils_1.noop));
|
||||
}
|
||||
Promise.all(clientCommandPromises)
|
||||
.catch(utils_1.noop)
|
||||
.finally(() => {
|
||||
if (!self.options.enableReadyCheck) {
|
||||
exports.readyHandler(self)();
|
||||
}
|
||||
if (self.options.enableReadyCheck) {
|
||||
self._readyCheck(function (err, info) {
|
||||
if (connectionEpoch !== self.connectionEpoch) {
|
||||
return;
|
||||
}
|
||||
if (err) {
|
||||
if (!flushed) {
|
||||
self.recoverFromFatalError(new Error("Ready check failed: " + err.message), err);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (self.connector.check(info)) {
|
||||
exports.readyHandler(self)();
|
||||
}
|
||||
else {
|
||||
self.disconnect(true);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
exports.connectHandler = connectHandler;
|
||||
function abortError(command) {
|
||||
const err = new redis_errors_1.AbortError("Command aborted due to connection close");
|
||||
err.command = {
|
||||
name: command.name,
|
||||
args: command.args,
|
||||
};
|
||||
return err;
|
||||
}
|
||||
// If a contiguous set of pipeline commands starts from index zero then they
|
||||
// can be safely reattempted. If however we have a chain of pipelined commands
|
||||
// starting at index 1 or more it means we received a partial response before
|
||||
// the connection close and those pipelined commands must be aborted. For
|
||||
// example, if the queue looks like this: [2, 3, 4, 0, 1, 2] then after
|
||||
// aborting and purging we'll have a queue that looks like this: [0, 1, 2]
|
||||
function abortIncompletePipelines(commandQueue) {
|
||||
var _a;
|
||||
let expectedIndex = 0;
|
||||
for (let i = 0; i < commandQueue.length;) {
|
||||
const command = (_a = commandQueue.peekAt(i)) === null || _a === void 0 ? void 0 : _a.command;
|
||||
const pipelineIndex = command.pipelineIndex;
|
||||
if (pipelineIndex === undefined || pipelineIndex === 0) {
|
||||
expectedIndex = 0;
|
||||
}
|
||||
if (pipelineIndex !== undefined && pipelineIndex !== expectedIndex++) {
|
||||
commandQueue.remove(i, 1);
|
||||
command.reject(abortError(command));
|
||||
continue;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
// If only a partial transaction result was received before connection close,
|
||||
// we have to abort any transaction fragments that may have ended up in the
|
||||
// offline queue
|
||||
function abortTransactionFragments(commandQueue) {
|
||||
var _a;
|
||||
for (let i = 0; i < commandQueue.length;) {
|
||||
const command = (_a = commandQueue.peekAt(i)) === null || _a === void 0 ? void 0 : _a.command;
|
||||
if (command.name === "multi") {
|
||||
break;
|
||||
}
|
||||
if (command.name === "exec") {
|
||||
commandQueue.remove(i, 1);
|
||||
command.reject(abortError(command));
|
||||
break;
|
||||
}
|
||||
if (command.inTransaction) {
|
||||
commandQueue.remove(i, 1);
|
||||
command.reject(abortError(command));
|
||||
}
|
||||
else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
function closeHandler(self) {
|
||||
return function () {
|
||||
const prevStatus = self.status;
|
||||
self.setStatus("close");
|
||||
if (self.commandQueue.length) {
|
||||
abortIncompletePipelines(self.commandQueue);
|
||||
}
|
||||
if (self.offlineQueue.length) {
|
||||
abortTransactionFragments(self.offlineQueue);
|
||||
}
|
||||
if (prevStatus === "ready") {
|
||||
if (!self.prevCondition) {
|
||||
self.prevCondition = self.condition;
|
||||
}
|
||||
if (self.commandQueue.length) {
|
||||
self.prevCommandQueue = self.commandQueue;
|
||||
}
|
||||
}
|
||||
if (self.manuallyClosing) {
|
||||
self.manuallyClosing = false;
|
||||
debug("skip reconnecting since the connection is manually closed.");
|
||||
return close();
|
||||
}
|
||||
if (typeof self.options.retryStrategy !== "function") {
|
||||
debug("skip reconnecting because `retryStrategy` is not a function");
|
||||
return close();
|
||||
}
|
||||
const retryDelay = self.options.retryStrategy(++self.retryAttempts);
|
||||
if (typeof retryDelay !== "number") {
|
||||
debug("skip reconnecting because `retryStrategy` doesn't return a number");
|
||||
return close();
|
||||
}
|
||||
debug("reconnect in %sms", retryDelay);
|
||||
self.setStatus("reconnecting", retryDelay);
|
||||
self.reconnectTimeout = setTimeout(function () {
|
||||
self.reconnectTimeout = null;
|
||||
self.connect().catch(utils_1.noop);
|
||||
}, retryDelay);
|
||||
const { maxRetriesPerRequest } = self.options;
|
||||
if (typeof maxRetriesPerRequest === "number") {
|
||||
if (maxRetriesPerRequest < 0) {
|
||||
debug("maxRetriesPerRequest is negative, ignoring...");
|
||||
}
|
||||
else {
|
||||
const remainder = self.retryAttempts % (maxRetriesPerRequest + 1);
|
||||
if (remainder === 0) {
|
||||
debug("reach maxRetriesPerRequest limitation, flushing command queue...");
|
||||
self.flushQueue(new errors_1.MaxRetriesPerRequestError(maxRetriesPerRequest));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
function close() {
|
||||
self.setStatus("end");
|
||||
self.flushQueue(new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
}
|
||||
}
|
||||
exports.closeHandler = closeHandler;
|
||||
function errorHandler(self) {
|
||||
return function (error) {
|
||||
debug("error: %s", error);
|
||||
self.silentEmit("error", error);
|
||||
};
|
||||
}
|
||||
exports.errorHandler = errorHandler;
|
||||
function readyHandler(self) {
|
||||
return function () {
|
||||
self.setStatus("ready");
|
||||
self.retryAttempts = 0;
|
||||
if (self.options.monitor) {
|
||||
self.call("monitor").then(() => self.setStatus("monitoring"), (error) => self.emit("error", error));
|
||||
const { sendCommand } = self;
|
||||
self.sendCommand = function (command) {
|
||||
if (Command_1.default.checkFlag("VALID_IN_MONITOR_MODE", command.name)) {
|
||||
return sendCommand.call(self, command);
|
||||
}
|
||||
command.reject(new Error("Connection is in monitoring mode, can't process commands."));
|
||||
return command.promise;
|
||||
};
|
||||
self.once("close", function () {
|
||||
delete self.sendCommand;
|
||||
});
|
||||
return;
|
||||
}
|
||||
const finalSelect = self.prevCondition
|
||||
? self.prevCondition.select
|
||||
: self.condition.select;
|
||||
if (self.options.readOnly) {
|
||||
debug("set the connection to readonly mode");
|
||||
self.readonly().catch(utils_1.noop);
|
||||
}
|
||||
if (self.prevCondition) {
|
||||
const condition = self.prevCondition;
|
||||
self.prevCondition = null;
|
||||
if (condition.subscriber && self.options.autoResubscribe) {
|
||||
// We re-select the previous db first since
|
||||
// `SELECT` command is not valid in sub mode.
|
||||
if (self.condition.select !== finalSelect) {
|
||||
debug("connect to db [%d]", finalSelect);
|
||||
self.select(finalSelect);
|
||||
}
|
||||
const subscribeChannels = condition.subscriber.channels("subscribe");
|
||||
if (subscribeChannels.length) {
|
||||
debug("subscribe %d channels", subscribeChannels.length);
|
||||
self.subscribe(subscribeChannels);
|
||||
}
|
||||
const psubscribeChannels = condition.subscriber.channels("psubscribe");
|
||||
if (psubscribeChannels.length) {
|
||||
debug("psubscribe %d channels", psubscribeChannels.length);
|
||||
self.psubscribe(psubscribeChannels);
|
||||
}
|
||||
const ssubscribeChannels = condition.subscriber.channels("ssubscribe");
|
||||
if (ssubscribeChannels.length) {
|
||||
debug("ssubscribe %s", ssubscribeChannels.length);
|
||||
for (const channel of ssubscribeChannels) {
|
||||
self.ssubscribe(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (self.prevCommandQueue) {
|
||||
if (self.options.autoResendUnfulfilledCommands) {
|
||||
debug("resend %d unfulfilled commands", self.prevCommandQueue.length);
|
||||
while (self.prevCommandQueue.length > 0) {
|
||||
const item = self.prevCommandQueue.shift();
|
||||
if (item.select !== self.condition.select &&
|
||||
item.command.name !== "select") {
|
||||
self.select(item.select);
|
||||
}
|
||||
self.sendCommand(item.command, item.stream);
|
||||
}
|
||||
}
|
||||
else {
|
||||
self.prevCommandQueue = null;
|
||||
}
|
||||
}
|
||||
if (self.offlineQueue.length) {
|
||||
debug("send %d commands in offline queue", self.offlineQueue.length);
|
||||
const offlineQueue = self.offlineQueue;
|
||||
self.resetOfflineQueue();
|
||||
while (offlineQueue.length > 0) {
|
||||
const item = offlineQueue.shift();
|
||||
if (item.select !== self.condition.select &&
|
||||
item.command.name !== "select") {
|
||||
self.select(item.select);
|
||||
}
|
||||
self.sendCommand(item.command, item.stream);
|
||||
}
|
||||
}
|
||||
if (self.condition.select !== finalSelect) {
|
||||
debug("connect to db [%d]", finalSelect);
|
||||
self.select(finalSelect);
|
||||
}
|
||||
};
|
||||
}
|
||||
exports.readyHandler = readyHandler;
|
||||
13
node_modules/ioredis/built/transaction.d.ts
generated
vendored
Normal file
13
node_modules/ioredis/built/transaction.d.ts
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
import { ChainableCommander } from "./utils/RedisCommander";
|
||||
export interface Transaction {
|
||||
pipeline(commands?: unknown[][]): ChainableCommander;
|
||||
multi(options: {
|
||||
pipeline: false;
|
||||
}): Promise<"OK">;
|
||||
multi(): ChainableCommander;
|
||||
multi(options: {
|
||||
pipeline: true;
|
||||
}): ChainableCommander;
|
||||
multi(commands?: unknown[][]): ChainableCommander;
|
||||
}
|
||||
export declare function addTransactionSupport(redis: any): void;
|
||||
93
node_modules/ioredis/built/transaction.js
generated
vendored
Normal file
93
node_modules/ioredis/built/transaction.js
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.addTransactionSupport = void 0;
|
||||
const utils_1 = require("./utils");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const Pipeline_1 = require("./Pipeline");
|
||||
function addTransactionSupport(redis) {
|
||||
redis.pipeline = function (commands) {
|
||||
const pipeline = new Pipeline_1.default(this);
|
||||
if (Array.isArray(commands)) {
|
||||
pipeline.addBatch(commands);
|
||||
}
|
||||
return pipeline;
|
||||
};
|
||||
const { multi } = redis;
|
||||
redis.multi = function (commands, options) {
|
||||
if (typeof options === "undefined" && !Array.isArray(commands)) {
|
||||
options = commands;
|
||||
commands = null;
|
||||
}
|
||||
if (options && options.pipeline === false) {
|
||||
return multi.call(this);
|
||||
}
|
||||
const pipeline = new Pipeline_1.default(this);
|
||||
// @ts-expect-error
|
||||
pipeline.multi();
|
||||
if (Array.isArray(commands)) {
|
||||
pipeline.addBatch(commands);
|
||||
}
|
||||
const exec = pipeline.exec;
|
||||
pipeline.exec = function (callback) {
|
||||
// Wait for the cluster to be connected, since we need nodes information before continuing
|
||||
if (this.isCluster && !this.redis.slots.length) {
|
||||
if (this.redis.status === "wait")
|
||||
this.redis.connect().catch(utils_1.noop);
|
||||
return (0, standard_as_callback_1.default)(new Promise((resolve, reject) => {
|
||||
this.redis.delayUntilReady((err) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
this.exec(pipeline).then(resolve, reject);
|
||||
});
|
||||
}), callback);
|
||||
}
|
||||
if (this._transactions > 0) {
|
||||
exec.call(pipeline);
|
||||
}
|
||||
// Returns directly when the pipeline
|
||||
// has been called multiple times (retries).
|
||||
if (this.nodeifiedPromise) {
|
||||
return exec.call(pipeline);
|
||||
}
|
||||
const promise = exec.call(pipeline);
|
||||
return (0, standard_as_callback_1.default)(promise.then(function (result) {
|
||||
const execResult = result[result.length - 1];
|
||||
if (typeof execResult === "undefined") {
|
||||
throw new Error("Pipeline cannot be used to send any commands when the `exec()` has been called on it.");
|
||||
}
|
||||
if (execResult[0]) {
|
||||
execResult[0].previousErrors = [];
|
||||
for (let i = 0; i < result.length - 1; ++i) {
|
||||
if (result[i][0]) {
|
||||
execResult[0].previousErrors.push(result[i][0]);
|
||||
}
|
||||
}
|
||||
throw execResult[0];
|
||||
}
|
||||
return (0, utils_1.wrapMultiResult)(execResult[1]);
|
||||
}), callback);
|
||||
};
|
||||
// @ts-expect-error
|
||||
const { execBuffer } = pipeline;
|
||||
// @ts-expect-error
|
||||
pipeline.execBuffer = function (callback) {
|
||||
if (this._transactions > 0) {
|
||||
execBuffer.call(pipeline);
|
||||
}
|
||||
return pipeline.exec(callback);
|
||||
};
|
||||
return pipeline;
|
||||
};
|
||||
const { exec } = redis;
|
||||
redis.exec = function (callback) {
|
||||
return (0, standard_as_callback_1.default)(exec.call(this).then(function (results) {
|
||||
if (Array.isArray(results)) {
|
||||
results = (0, utils_1.wrapMultiResult)(results);
|
||||
}
|
||||
return results;
|
||||
}), callback);
|
||||
};
|
||||
}
|
||||
exports.addTransactionSupport = addTransactionSupport;
|
||||
33
node_modules/ioredis/built/types.d.ts
generated
vendored
Normal file
33
node_modules/ioredis/built/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
/// <reference types="node" />
|
||||
import { Socket } from "net";
|
||||
import { TLSSocket } from "tls";
|
||||
export declare type Callback<T = any> = (err?: Error | null, result?: T) => void;
|
||||
export declare type NetStream = Socket | TLSSocket;
|
||||
export declare type CommandParameter = string | Buffer | number | any[];
|
||||
export interface Respondable {
|
||||
name: string;
|
||||
args: CommandParameter[];
|
||||
resolve(result: any): void;
|
||||
reject(error: Error): void;
|
||||
}
|
||||
export interface PipelineWriteableStream {
|
||||
isPipeline: true;
|
||||
write(data: string | Buffer): unknown;
|
||||
destination: {
|
||||
redis: {
|
||||
stream: NetStream;
|
||||
};
|
||||
};
|
||||
}
|
||||
export declare type WriteableStream = NetStream | PipelineWriteableStream;
|
||||
export interface CommandItem {
|
||||
command: Respondable;
|
||||
stream: WriteableStream;
|
||||
select: number;
|
||||
}
|
||||
export interface ScanStreamOptions {
|
||||
match?: string;
|
||||
type?: string;
|
||||
count?: number;
|
||||
noValues?: boolean;
|
||||
}
|
||||
2
node_modules/ioredis/built/types.js
generated
vendored
Normal file
2
node_modules/ioredis/built/types.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
50
node_modules/ioredis/built/utils/Commander.d.ts
generated
vendored
Normal file
50
node_modules/ioredis/built/utils/Commander.d.ts
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
import Command from "../Command";
|
||||
import { WriteableStream } from "../types";
|
||||
import RedisCommander, { ClientContext } from "./RedisCommander";
|
||||
export interface CommanderOptions {
|
||||
keyPrefix?: string | undefined;
|
||||
showFriendlyErrorStack?: boolean | undefined;
|
||||
}
|
||||
declare class Commander<Context extends ClientContext = {
|
||||
type: "default";
|
||||
}> {
|
||||
options: CommanderOptions;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
scriptsSet: {};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
addedBuiltinSet: Set<string>;
|
||||
/**
|
||||
* Return supported builtin commands
|
||||
*/
|
||||
getBuiltinCommands(): string[];
|
||||
/**
|
||||
* Create a builtin command
|
||||
*/
|
||||
createBuiltinCommand(commandName: string): {
|
||||
string: any;
|
||||
buffer: any;
|
||||
};
|
||||
/**
|
||||
* Create add builtin command
|
||||
*/
|
||||
addBuiltinCommand(commandName: string): void;
|
||||
/**
|
||||
* Define a custom command using lua script
|
||||
*/
|
||||
defineCommand(name: string, definition: {
|
||||
lua: string;
|
||||
numberOfKeys?: number;
|
||||
readOnly?: boolean;
|
||||
}): void;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command: Command, stream?: WriteableStream, node?: unknown): unknown;
|
||||
}
|
||||
interface Commander<Context> extends RedisCommander<Context> {
|
||||
}
|
||||
export default Commander;
|
||||
117
node_modules/ioredis/built/utils/Commander.js
generated
vendored
Normal file
117
node_modules/ioredis/built/utils/Commander.js
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
const autoPipelining_1 = require("../autoPipelining");
|
||||
const Command_1 = require("../Command");
|
||||
const Script_1 = require("../Script");
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
class Commander {
|
||||
constructor() {
|
||||
this.options = {};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this.scriptsSet = {};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this.addedBuiltinSet = new Set();
|
||||
}
|
||||
/**
|
||||
* Return supported builtin commands
|
||||
*/
|
||||
getBuiltinCommands() {
|
||||
return commands.slice(0);
|
||||
}
|
||||
/**
|
||||
* Create a builtin command
|
||||
*/
|
||||
createBuiltinCommand(commandName) {
|
||||
return {
|
||||
string: generateFunction(null, commandName, "utf8"),
|
||||
buffer: generateFunction(null, commandName, null),
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Create add builtin command
|
||||
*/
|
||||
addBuiltinCommand(commandName) {
|
||||
this.addedBuiltinSet.add(commandName);
|
||||
this[commandName] = generateFunction(commandName, commandName, "utf8");
|
||||
this[commandName + "Buffer"] = generateFunction(commandName + "Buffer", commandName, null);
|
||||
}
|
||||
/**
|
||||
* Define a custom command using lua script
|
||||
*/
|
||||
defineCommand(name, definition) {
|
||||
const script = new Script_1.default(definition.lua, definition.numberOfKeys, this.options.keyPrefix, definition.readOnly);
|
||||
this.scriptsSet[name] = script;
|
||||
this[name] = generateScriptingFunction(name, name, script, "utf8");
|
||||
this[name + "Buffer"] = generateScriptingFunction(name + "Buffer", name, script, null);
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command, stream, node) {
|
||||
throw new Error('"sendCommand" is not implemented');
|
||||
}
|
||||
}
|
||||
const commands = commands_1.list.filter((command) => command !== "monitor");
|
||||
commands.push("sentinel");
|
||||
commands.forEach(function (commandName) {
|
||||
Commander.prototype[commandName] = generateFunction(commandName, commandName, "utf8");
|
||||
Commander.prototype[commandName + "Buffer"] = generateFunction(commandName + "Buffer", commandName, null);
|
||||
});
|
||||
Commander.prototype.call = generateFunction("call", "utf8");
|
||||
Commander.prototype.callBuffer = generateFunction("callBuffer", null);
|
||||
// @ts-expect-error
|
||||
Commander.prototype.send_command = Commander.prototype.call;
|
||||
function generateFunction(functionName, _commandName, _encoding) {
|
||||
if (typeof _encoding === "undefined") {
|
||||
_encoding = _commandName;
|
||||
_commandName = null;
|
||||
}
|
||||
return function (...args) {
|
||||
const commandName = (_commandName || args.shift());
|
||||
let callback = args[args.length - 1];
|
||||
if (typeof callback === "function") {
|
||||
args.pop();
|
||||
}
|
||||
else {
|
||||
callback = undefined;
|
||||
}
|
||||
const options = {
|
||||
errorStack: this.options.showFriendlyErrorStack ? new Error() : undefined,
|
||||
keyPrefix: this.options.keyPrefix,
|
||||
replyEncoding: _encoding,
|
||||
};
|
||||
// No auto pipeline, use regular command sending
|
||||
if (!(0, autoPipelining_1.shouldUseAutoPipelining)(this, functionName, commandName)) {
|
||||
return this.sendCommand(
|
||||
// @ts-expect-error
|
||||
new Command_1.default(commandName, args, options, callback));
|
||||
}
|
||||
// Create a new pipeline and make sure it's scheduled
|
||||
return (0, autoPipelining_1.executeWithAutoPipelining)(this, functionName, commandName,
|
||||
// @ts-expect-error
|
||||
args, callback);
|
||||
};
|
||||
}
|
||||
function generateScriptingFunction(functionName, commandName, script, encoding) {
|
||||
return function (...args) {
|
||||
const callback = typeof args[args.length - 1] === "function" ? args.pop() : undefined;
|
||||
const options = {
|
||||
replyEncoding: encoding,
|
||||
};
|
||||
if (this.options.showFriendlyErrorStack) {
|
||||
options.errorStack = new Error();
|
||||
}
|
||||
// No auto pipeline, use regular command sending
|
||||
if (!(0, autoPipelining_1.shouldUseAutoPipelining)(this, functionName, commandName)) {
|
||||
return script.execute(this, args, options, callback);
|
||||
}
|
||||
// Create a new pipeline and make sure it's scheduled
|
||||
return (0, autoPipelining_1.executeWithAutoPipelining)(this, functionName, commandName, args, callback);
|
||||
};
|
||||
}
|
||||
exports.default = Commander;
|
||||
8950
node_modules/ioredis/built/utils/RedisCommander.d.ts
generated
vendored
Normal file
8950
node_modules/ioredis/built/utils/RedisCommander.d.ts
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7
node_modules/ioredis/built/utils/RedisCommander.js
generated
vendored
Normal file
7
node_modules/ioredis/built/utils/RedisCommander.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
"use strict";
|
||||
/**
|
||||
* This file is generated by @ioredis/interface-generator.
|
||||
* Don't edit it manually. Instead, run `npm run generate` to update
|
||||
* this file.
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
3
node_modules/ioredis/built/utils/applyMixin.d.ts
generated
vendored
Normal file
3
node_modules/ioredis/built/utils/applyMixin.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
declare type Constructor = new (...args: any[]) => void;
|
||||
declare function applyMixin(derivedConstructor: Constructor, mixinConstructor: Constructor): void;
|
||||
export default applyMixin;
|
||||
8
node_modules/ioredis/built/utils/applyMixin.js
generated
vendored
Normal file
8
node_modules/ioredis/built/utils/applyMixin.js
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
function applyMixin(derivedConstructor, mixinConstructor) {
|
||||
Object.getOwnPropertyNames(mixinConstructor.prototype).forEach((name) => {
|
||||
Object.defineProperty(derivedConstructor.prototype, name, Object.getOwnPropertyDescriptor(mixinConstructor.prototype, name));
|
||||
});
|
||||
}
|
||||
exports.default = applyMixin;
|
||||
14
node_modules/ioredis/built/utils/argumentParsers.d.ts
generated
vendored
Normal file
14
node_modules/ioredis/built/utils/argumentParsers.d.ts
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import { CommandParameter } from "../types";
|
||||
/**
|
||||
* Parses a command parameter as seconds and converts to milliseconds.
|
||||
* @param arg - The command parameter representing seconds
|
||||
* @returns The value in milliseconds, 0 if value is <= 0, or undefined if parsing fails
|
||||
*/
|
||||
export declare const parseSecondsArgument: (arg: CommandParameter | undefined) => number | undefined;
|
||||
/**
|
||||
* Parses the BLOCK option from Redis command arguments (e.g., XREAD, XREADGROUP).
|
||||
* @param args - Array of command parameters to search for the BLOCK option
|
||||
* @returns The block duration in milliseconds, 0 if duration is <= 0,
|
||||
* null if BLOCK option is not found, or undefined if BLOCK is found but duration is invalid
|
||||
*/
|
||||
export declare const parseBlockOption: (args: CommandParameter[]) => number | null | undefined;
|
||||
74
node_modules/ioredis/built/utils/argumentParsers.js
generated
vendored
Normal file
74
node_modules/ioredis/built/utils/argumentParsers.js
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.parseBlockOption = exports.parseSecondsArgument = void 0;
|
||||
/**
|
||||
* Parses a command parameter to a number.
|
||||
* @param arg - The command parameter to parse (number, string, or Buffer)
|
||||
* @returns The parsed number, or undefined if parsing fails or arg is undefined
|
||||
*/
|
||||
const parseNumberArgument = (arg) => {
|
||||
if (typeof arg === "number") {
|
||||
return arg;
|
||||
}
|
||||
if (Buffer.isBuffer(arg)) {
|
||||
return parseNumberArgument(arg.toString());
|
||||
}
|
||||
if (typeof arg === "string") {
|
||||
const value = Number(arg);
|
||||
return Number.isFinite(value) ? value : undefined;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
/**
|
||||
* Parses a command parameter to a string.
|
||||
* @param arg - The command parameter to parse (string or Buffer)
|
||||
* @returns The parsed string, or undefined if arg is not a string/Buffer or is undefined
|
||||
*/
|
||||
const parseStringArgument = (arg) => {
|
||||
if (typeof arg === "string") {
|
||||
return arg;
|
||||
}
|
||||
if (Buffer.isBuffer(arg)) {
|
||||
return arg.toString();
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
/**
|
||||
* Parses a command parameter as seconds and converts to milliseconds.
|
||||
* @param arg - The command parameter representing seconds
|
||||
* @returns The value in milliseconds, 0 if value is <= 0, or undefined if parsing fails
|
||||
*/
|
||||
const parseSecondsArgument = (arg) => {
|
||||
const value = parseNumberArgument(arg);
|
||||
if (value === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
if (value <= 0) {
|
||||
return 0;
|
||||
}
|
||||
return value * 1000;
|
||||
};
|
||||
exports.parseSecondsArgument = parseSecondsArgument;
|
||||
/**
|
||||
* Parses the BLOCK option from Redis command arguments (e.g., XREAD, XREADGROUP).
|
||||
* @param args - Array of command parameters to search for the BLOCK option
|
||||
* @returns The block duration in milliseconds, 0 if duration is <= 0,
|
||||
* null if BLOCK option is not found, or undefined if BLOCK is found but duration is invalid
|
||||
*/
|
||||
const parseBlockOption = (args) => {
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const token = parseStringArgument(args[i]);
|
||||
if (token && token.toLowerCase() === "block") {
|
||||
const duration = parseNumberArgument(args[i + 1]);
|
||||
if (duration === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
if (duration <= 0) {
|
||||
return 0;
|
||||
}
|
||||
return duration;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
};
|
||||
exports.parseBlockOption = parseBlockOption;
|
||||
16
node_modules/ioredis/built/utils/debug.d.ts
generated
vendored
Normal file
16
node_modules/ioredis/built/utils/debug.d.ts
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
declare const MAX_ARGUMENT_LENGTH = 200;
|
||||
/**
|
||||
* helper function that tried to get a string value for
|
||||
* arbitrary "debug" arg
|
||||
*/
|
||||
declare function getStringValue(v: any): string | void;
|
||||
/**
|
||||
* helper function that redacts a string representation of a "debug" arg
|
||||
*/
|
||||
declare function genRedactedString(str: string, maxLen: number): string;
|
||||
/**
|
||||
* a wrapper for the `debug` module, used to generate
|
||||
* "debug functions" that trim the values in their output
|
||||
*/
|
||||
export default function genDebugFunction(namespace: string): (...args: any[]) => void;
|
||||
export { MAX_ARGUMENT_LENGTH, getStringValue, genRedactedString };
|
||||
95
node_modules/ioredis/built/utils/debug.js
generated
vendored
Normal file
95
node_modules/ioredis/built/utils/debug.js
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.genRedactedString = exports.getStringValue = exports.MAX_ARGUMENT_LENGTH = void 0;
|
||||
const debug_1 = require("debug");
|
||||
const MAX_ARGUMENT_LENGTH = 200;
|
||||
exports.MAX_ARGUMENT_LENGTH = MAX_ARGUMENT_LENGTH;
|
||||
const NAMESPACE_PREFIX = "ioredis";
|
||||
/**
|
||||
* helper function that tried to get a string value for
|
||||
* arbitrary "debug" arg
|
||||
*/
|
||||
function getStringValue(v) {
|
||||
if (v === null) {
|
||||
return;
|
||||
}
|
||||
switch (typeof v) {
|
||||
case "boolean":
|
||||
return;
|
||||
case "number":
|
||||
return;
|
||||
case "object":
|
||||
if (Buffer.isBuffer(v)) {
|
||||
return v.toString("hex");
|
||||
}
|
||||
if (Array.isArray(v)) {
|
||||
return v.join(",");
|
||||
}
|
||||
try {
|
||||
return JSON.stringify(v);
|
||||
}
|
||||
catch (e) {
|
||||
return;
|
||||
}
|
||||
case "string":
|
||||
return v;
|
||||
}
|
||||
}
|
||||
exports.getStringValue = getStringValue;
|
||||
/**
|
||||
* helper function that redacts a string representation of a "debug" arg
|
||||
*/
|
||||
function genRedactedString(str, maxLen) {
|
||||
const { length } = str;
|
||||
return length <= maxLen
|
||||
? str
|
||||
: str.slice(0, maxLen) + ' ... <REDACTED full-length="' + length + '">';
|
||||
}
|
||||
exports.genRedactedString = genRedactedString;
|
||||
/**
|
||||
* a wrapper for the `debug` module, used to generate
|
||||
* "debug functions" that trim the values in their output
|
||||
*/
|
||||
function genDebugFunction(namespace) {
|
||||
const fn = (0, debug_1.default)(`${NAMESPACE_PREFIX}:${namespace}`);
|
||||
function wrappedDebug(...args) {
|
||||
if (!fn.enabled) {
|
||||
return; // no-op
|
||||
}
|
||||
// we skip the first arg because that is the message
|
||||
for (let i = 1; i < args.length; i++) {
|
||||
const str = getStringValue(args[i]);
|
||||
if (typeof str === "string" && str.length > MAX_ARGUMENT_LENGTH) {
|
||||
args[i] = genRedactedString(str, MAX_ARGUMENT_LENGTH);
|
||||
}
|
||||
}
|
||||
return fn.apply(null, args);
|
||||
}
|
||||
Object.defineProperties(wrappedDebug, {
|
||||
namespace: {
|
||||
get() {
|
||||
return fn.namespace;
|
||||
},
|
||||
},
|
||||
enabled: {
|
||||
get() {
|
||||
return fn.enabled;
|
||||
},
|
||||
},
|
||||
destroy: {
|
||||
get() {
|
||||
return fn.destroy;
|
||||
},
|
||||
},
|
||||
log: {
|
||||
get() {
|
||||
return fn.log;
|
||||
},
|
||||
set(l) {
|
||||
fn.log = l;
|
||||
},
|
||||
},
|
||||
});
|
||||
return wrappedDebug;
|
||||
}
|
||||
exports.default = genDebugFunction;
|
||||
124
node_modules/ioredis/built/utils/index.d.ts
generated
vendored
Normal file
124
node_modules/ioredis/built/utils/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
/// <reference types="node" />
|
||||
import { defaults, noop } from "./lodash";
|
||||
import { Callback } from "../types";
|
||||
import Debug from "./debug";
|
||||
/**
|
||||
* Convert a buffer to string, supports buffer array
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* const input = [Buffer.from('foo'), [Buffer.from('bar')]]
|
||||
* const res = convertBufferToString(input, 'utf8')
|
||||
* expect(res).to.eql(['foo', ['bar']])
|
||||
* ```
|
||||
*/
|
||||
export declare function convertBufferToString(value: any, encoding?: BufferEncoding): any;
|
||||
/**
|
||||
* Convert a list of results to node-style
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* const input = ['a', 'b', new Error('c'), 'd']
|
||||
* const output = exports.wrapMultiResult(input)
|
||||
* expect(output).to.eql([[null, 'a'], [null, 'b'], [new Error('c')], [null, 'd'])
|
||||
* ```
|
||||
*/
|
||||
export declare function wrapMultiResult(arr: unknown[] | null): unknown[][] | null;
|
||||
/**
|
||||
* Detect if the argument is a int
|
||||
* @example
|
||||
* ```js
|
||||
* > isInt('123')
|
||||
* true
|
||||
* > isInt('123.3')
|
||||
* false
|
||||
* > isInt('1x')
|
||||
* false
|
||||
* > isInt(123)
|
||||
* true
|
||||
* > isInt(true)
|
||||
* false
|
||||
* ```
|
||||
*/
|
||||
export declare function isInt(value: any): value is string;
|
||||
/**
|
||||
* Pack an array to an Object
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* > packObject(['a', 'b', 'c', 'd'])
|
||||
* { a: 'b', c: 'd' }
|
||||
* ```
|
||||
*/
|
||||
export declare function packObject(array: any[]): Record<string, any>;
|
||||
/**
|
||||
* Return a callback with timeout
|
||||
*/
|
||||
export declare function timeout<T>(callback: Callback<T>, timeout: number): Callback<T>;
|
||||
/**
|
||||
* Convert an object to an array
|
||||
* @example
|
||||
* ```js
|
||||
* > convertObjectToArray({ a: '1' })
|
||||
* ['a', '1']
|
||||
* ```
|
||||
*/
|
||||
export declare function convertObjectToArray<T>(obj: Record<string, T>): (string | T)[];
|
||||
/**
|
||||
* Convert a map to an array
|
||||
* @example
|
||||
* ```js
|
||||
* > convertMapToArray(new Map([[1, '2']]))
|
||||
* [1, '2']
|
||||
* ```
|
||||
*/
|
||||
export declare function convertMapToArray<K, V>(map: Map<K, V>): (K | V)[];
|
||||
/**
|
||||
* Convert a non-string arg to a string
|
||||
*/
|
||||
export declare function toArg(arg: any): string;
|
||||
/**
|
||||
* Optimize error stack
|
||||
*
|
||||
* @param error actually error
|
||||
* @param friendlyStack the stack that more meaningful
|
||||
* @param filterPath only show stacks with the specified path
|
||||
*/
|
||||
export declare function optimizeErrorStack(error: Error, friendlyStack: string, filterPath: string): Error;
|
||||
/**
|
||||
* Parse the redis protocol url
|
||||
*/
|
||||
export declare function parseURL(url: string): Record<string, unknown>;
|
||||
interface TLSOptions {
|
||||
port: number;
|
||||
host: string;
|
||||
[key: string]: any;
|
||||
}
|
||||
/**
|
||||
* Resolve TLS profile shortcut in connection options
|
||||
*/
|
||||
export declare function resolveTLSProfile(options: TLSOptions): TLSOptions;
|
||||
/**
|
||||
* Get a random element from `array`
|
||||
*/
|
||||
export declare function sample<T>(array: T[], from?: number): T;
|
||||
/**
|
||||
* Shuffle the array using the Fisher-Yates Shuffle.
|
||||
* This method will mutate the original array.
|
||||
*/
|
||||
export declare function shuffle<T>(array: T[]): T[];
|
||||
/**
|
||||
* Error message for connection being disconnected
|
||||
*/
|
||||
export declare const CONNECTION_CLOSED_ERROR_MSG = "Connection is closed.";
|
||||
export declare function zipMap<K, V>(keys: K[], values: V[]): Map<K, V>;
|
||||
/**
|
||||
* Retrieves cached package metadata from package.json.
|
||||
*
|
||||
* @internal
|
||||
* @returns {Promise<{version: string} | null>} Package metadata or null if unavailable
|
||||
*/
|
||||
export declare function getPackageMeta(): Promise<{
|
||||
version: string;
|
||||
}>;
|
||||
export { Debug, defaults, noop };
|
||||
332
node_modules/ioredis/built/utils/index.js
generated
vendored
Normal file
332
node_modules/ioredis/built/utils/index.js
generated
vendored
Normal file
@@ -0,0 +1,332 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.noop = exports.defaults = exports.Debug = exports.getPackageMeta = exports.zipMap = exports.CONNECTION_CLOSED_ERROR_MSG = exports.shuffle = exports.sample = exports.resolveTLSProfile = exports.parseURL = exports.optimizeErrorStack = exports.toArg = exports.convertMapToArray = exports.convertObjectToArray = exports.timeout = exports.packObject = exports.isInt = exports.wrapMultiResult = exports.convertBufferToString = void 0;
|
||||
const fs_1 = require("fs");
|
||||
const path_1 = require("path");
|
||||
const url_1 = require("url");
|
||||
const lodash_1 = require("./lodash");
|
||||
Object.defineProperty(exports, "defaults", { enumerable: true, get: function () { return lodash_1.defaults; } });
|
||||
Object.defineProperty(exports, "noop", { enumerable: true, get: function () { return lodash_1.noop; } });
|
||||
const debug_1 = require("./debug");
|
||||
exports.Debug = debug_1.default;
|
||||
const TLSProfiles_1 = require("../constants/TLSProfiles");
|
||||
/**
|
||||
* Convert a buffer to string, supports buffer array
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* const input = [Buffer.from('foo'), [Buffer.from('bar')]]
|
||||
* const res = convertBufferToString(input, 'utf8')
|
||||
* expect(res).to.eql(['foo', ['bar']])
|
||||
* ```
|
||||
*/
|
||||
function convertBufferToString(value, encoding) {
|
||||
if (value instanceof Buffer) {
|
||||
return value.toString(encoding);
|
||||
}
|
||||
if (Array.isArray(value)) {
|
||||
const length = value.length;
|
||||
const res = Array(length);
|
||||
for (let i = 0; i < length; ++i) {
|
||||
res[i] =
|
||||
value[i] instanceof Buffer && encoding === "utf8"
|
||||
? value[i].toString()
|
||||
: convertBufferToString(value[i], encoding);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
exports.convertBufferToString = convertBufferToString;
|
||||
/**
|
||||
* Convert a list of results to node-style
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* const input = ['a', 'b', new Error('c'), 'd']
|
||||
* const output = exports.wrapMultiResult(input)
|
||||
* expect(output).to.eql([[null, 'a'], [null, 'b'], [new Error('c')], [null, 'd'])
|
||||
* ```
|
||||
*/
|
||||
function wrapMultiResult(arr) {
|
||||
// When using WATCH/EXEC transactions, the EXEC will return
|
||||
// a null instead of an array
|
||||
if (!arr) {
|
||||
return null;
|
||||
}
|
||||
const result = [];
|
||||
const length = arr.length;
|
||||
for (let i = 0; i < length; ++i) {
|
||||
const item = arr[i];
|
||||
if (item instanceof Error) {
|
||||
result.push([item]);
|
||||
}
|
||||
else {
|
||||
result.push([null, item]);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
exports.wrapMultiResult = wrapMultiResult;
|
||||
/**
|
||||
* Detect if the argument is a int
|
||||
* @example
|
||||
* ```js
|
||||
* > isInt('123')
|
||||
* true
|
||||
* > isInt('123.3')
|
||||
* false
|
||||
* > isInt('1x')
|
||||
* false
|
||||
* > isInt(123)
|
||||
* true
|
||||
* > isInt(true)
|
||||
* false
|
||||
* ```
|
||||
*/
|
||||
function isInt(value) {
|
||||
const x = parseFloat(value);
|
||||
return !isNaN(value) && (x | 0) === x;
|
||||
}
|
||||
exports.isInt = isInt;
|
||||
/**
|
||||
* Pack an array to an Object
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* > packObject(['a', 'b', 'c', 'd'])
|
||||
* { a: 'b', c: 'd' }
|
||||
* ```
|
||||
*/
|
||||
function packObject(array) {
|
||||
const result = {};
|
||||
const length = array.length;
|
||||
for (let i = 1; i < length; i += 2) {
|
||||
result[array[i - 1]] = array[i];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
exports.packObject = packObject;
|
||||
/**
|
||||
* Return a callback with timeout
|
||||
*/
|
||||
function timeout(callback, timeout) {
|
||||
let timer = null;
|
||||
const run = function () {
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
timer = null;
|
||||
callback.apply(this, arguments);
|
||||
}
|
||||
};
|
||||
timer = setTimeout(run, timeout, new Error("timeout"));
|
||||
return run;
|
||||
}
|
||||
exports.timeout = timeout;
|
||||
/**
|
||||
* Convert an object to an array
|
||||
* @example
|
||||
* ```js
|
||||
* > convertObjectToArray({ a: '1' })
|
||||
* ['a', '1']
|
||||
* ```
|
||||
*/
|
||||
function convertObjectToArray(obj) {
|
||||
const result = [];
|
||||
const keys = Object.keys(obj); // Object.entries requires node 7+
|
||||
for (let i = 0, l = keys.length; i < l; i++) {
|
||||
result.push(keys[i], obj[keys[i]]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
exports.convertObjectToArray = convertObjectToArray;
|
||||
/**
|
||||
* Convert a map to an array
|
||||
* @example
|
||||
* ```js
|
||||
* > convertMapToArray(new Map([[1, '2']]))
|
||||
* [1, '2']
|
||||
* ```
|
||||
*/
|
||||
function convertMapToArray(map) {
|
||||
const result = [];
|
||||
let pos = 0;
|
||||
map.forEach(function (value, key) {
|
||||
result[pos] = key;
|
||||
result[pos + 1] = value;
|
||||
pos += 2;
|
||||
});
|
||||
return result;
|
||||
}
|
||||
exports.convertMapToArray = convertMapToArray;
|
||||
/**
|
||||
* Convert a non-string arg to a string
|
||||
*/
|
||||
function toArg(arg) {
|
||||
if (arg === null || typeof arg === "undefined") {
|
||||
return "";
|
||||
}
|
||||
return String(arg);
|
||||
}
|
||||
exports.toArg = toArg;
|
||||
/**
|
||||
* Optimize error stack
|
||||
*
|
||||
* @param error actually error
|
||||
* @param friendlyStack the stack that more meaningful
|
||||
* @param filterPath only show stacks with the specified path
|
||||
*/
|
||||
function optimizeErrorStack(error, friendlyStack, filterPath) {
|
||||
const stacks = friendlyStack.split("\n");
|
||||
let lines = "";
|
||||
let i;
|
||||
for (i = 1; i < stacks.length; ++i) {
|
||||
if (stacks[i].indexOf(filterPath) === -1) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (let j = i; j < stacks.length; ++j) {
|
||||
lines += "\n" + stacks[j];
|
||||
}
|
||||
if (error.stack) {
|
||||
const pos = error.stack.indexOf("\n");
|
||||
error.stack = error.stack.slice(0, pos) + lines;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
exports.optimizeErrorStack = optimizeErrorStack;
|
||||
/**
|
||||
* Parse the redis protocol url
|
||||
*/
|
||||
function parseURL(url) {
|
||||
if (isInt(url)) {
|
||||
return { port: url };
|
||||
}
|
||||
let parsed = (0, url_1.parse)(url, true, true);
|
||||
if (!parsed.slashes && url[0] !== "/") {
|
||||
url = "//" + url;
|
||||
parsed = (0, url_1.parse)(url, true, true);
|
||||
}
|
||||
const options = parsed.query || {};
|
||||
const result = {};
|
||||
if (parsed.auth) {
|
||||
const index = parsed.auth.indexOf(":");
|
||||
result.username = index === -1 ? parsed.auth : parsed.auth.slice(0, index);
|
||||
result.password = index === -1 ? "" : parsed.auth.slice(index + 1);
|
||||
}
|
||||
if (parsed.pathname) {
|
||||
if (parsed.protocol === "redis:" || parsed.protocol === "rediss:") {
|
||||
if (parsed.pathname.length > 1) {
|
||||
result.db = parsed.pathname.slice(1);
|
||||
}
|
||||
}
|
||||
else {
|
||||
result.path = parsed.pathname;
|
||||
}
|
||||
}
|
||||
if (parsed.host) {
|
||||
result.host = parsed.hostname;
|
||||
}
|
||||
if (parsed.port) {
|
||||
result.port = parsed.port;
|
||||
}
|
||||
if (typeof options.family === "string") {
|
||||
const intFamily = Number.parseInt(options.family, 10);
|
||||
if (!Number.isNaN(intFamily)) {
|
||||
result.family = intFamily;
|
||||
}
|
||||
}
|
||||
(0, lodash_1.defaults)(result, options);
|
||||
return result;
|
||||
}
|
||||
exports.parseURL = parseURL;
|
||||
/**
|
||||
* Resolve TLS profile shortcut in connection options
|
||||
*/
|
||||
function resolveTLSProfile(options) {
|
||||
let tls = options === null || options === void 0 ? void 0 : options.tls;
|
||||
if (typeof tls === "string")
|
||||
tls = { profile: tls };
|
||||
const profile = TLSProfiles_1.default[tls === null || tls === void 0 ? void 0 : tls.profile];
|
||||
if (profile) {
|
||||
tls = Object.assign({}, profile, tls);
|
||||
delete tls.profile;
|
||||
options = Object.assign({}, options, { tls });
|
||||
}
|
||||
return options;
|
||||
}
|
||||
exports.resolveTLSProfile = resolveTLSProfile;
|
||||
/**
|
||||
* Get a random element from `array`
|
||||
*/
|
||||
function sample(array, from = 0) {
|
||||
const length = array.length;
|
||||
if (from >= length) {
|
||||
return null;
|
||||
}
|
||||
return array[from + Math.floor(Math.random() * (length - from))];
|
||||
}
|
||||
exports.sample = sample;
|
||||
/**
|
||||
* Shuffle the array using the Fisher-Yates Shuffle.
|
||||
* This method will mutate the original array.
|
||||
*/
|
||||
function shuffle(array) {
|
||||
let counter = array.length;
|
||||
// While there are elements in the array
|
||||
while (counter > 0) {
|
||||
// Pick a random index
|
||||
const index = Math.floor(Math.random() * counter);
|
||||
// Decrease counter by 1
|
||||
counter--;
|
||||
// And swap the last element with it
|
||||
[array[counter], array[index]] = [array[index], array[counter]];
|
||||
}
|
||||
return array;
|
||||
}
|
||||
exports.shuffle = shuffle;
|
||||
/**
|
||||
* Error message for connection being disconnected
|
||||
*/
|
||||
exports.CONNECTION_CLOSED_ERROR_MSG = "Connection is closed.";
|
||||
function zipMap(keys, values) {
|
||||
const map = new Map();
|
||||
keys.forEach((key, index) => {
|
||||
map.set(key, values[index]);
|
||||
});
|
||||
return map;
|
||||
}
|
||||
exports.zipMap = zipMap;
|
||||
/**
|
||||
* Memoized package metadata to avoid repeated file system reads.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
let cachedPackageMeta = null;
|
||||
/**
|
||||
* Retrieves cached package metadata from package.json.
|
||||
*
|
||||
* @internal
|
||||
* @returns {Promise<{version: string} | null>} Package metadata or null if unavailable
|
||||
*/
|
||||
async function getPackageMeta() {
|
||||
if (cachedPackageMeta) {
|
||||
return cachedPackageMeta;
|
||||
}
|
||||
try {
|
||||
const filePath = (0, path_1.resolve)(__dirname, "..", "..", "package.json");
|
||||
const data = await fs_1.promises.readFile(filePath, "utf8");
|
||||
const parsed = JSON.parse(data);
|
||||
cachedPackageMeta = {
|
||||
version: parsed.version,
|
||||
};
|
||||
return cachedPackageMeta;
|
||||
}
|
||||
catch (err) {
|
||||
cachedPackageMeta = {
|
||||
version: "error-fetching-version",
|
||||
};
|
||||
return cachedPackageMeta;
|
||||
}
|
||||
}
|
||||
exports.getPackageMeta = getPackageMeta;
|
||||
4
node_modules/ioredis/built/utils/lodash.d.ts
generated
vendored
Normal file
4
node_modules/ioredis/built/utils/lodash.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import defaults = require("lodash.defaults");
|
||||
import isArguments = require("lodash.isarguments");
|
||||
export declare function noop(): void;
|
||||
export { defaults, isArguments };
|
||||
9
node_modules/ioredis/built/utils/lodash.js
generated
vendored
Normal file
9
node_modules/ioredis/built/utils/lodash.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isArguments = exports.defaults = exports.noop = void 0;
|
||||
const defaults = require("lodash.defaults");
|
||||
exports.defaults = defaults;
|
||||
const isArguments = require("lodash.isarguments");
|
||||
exports.isArguments = isArguments;
|
||||
function noop() { }
|
||||
exports.noop = noop;
|
||||
20
node_modules/ioredis/node_modules/debug/LICENSE
generated
vendored
Normal file
20
node_modules/ioredis/node_modules/debug/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
(The MIT License)
|
||||
|
||||
Copyright (c) 2014-2017 TJ Holowaychuk <tj@vision-media.ca>
|
||||
Copyright (c) 2018-2021 Josh Junon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
||||
and associated documentation files (the 'Software'), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial
|
||||
portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
|
||||
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
481
node_modules/ioredis/node_modules/debug/README.md
generated
vendored
Normal file
481
node_modules/ioredis/node_modules/debug/README.md
generated
vendored
Normal file
@@ -0,0 +1,481 @@
|
||||
# debug
|
||||
[](#backers)
|
||||
[](#sponsors)
|
||||
|
||||
<img width="647" src="https://user-images.githubusercontent.com/71256/29091486-fa38524c-7c37-11e7-895f-e7ec8e1039b6.png">
|
||||
|
||||
A tiny JavaScript debugging utility modelled after Node.js core's debugging
|
||||
technique. Works in Node.js and web browsers.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ npm install debug
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
`debug` exposes a function; simply pass this function the name of your module, and it will return a decorated version of `console.error` for you to pass debug statements to. This will allow you to toggle the debug output for different parts of your module as well as the module as a whole.
|
||||
|
||||
Example [_app.js_](./examples/node/app.js):
|
||||
|
||||
```js
|
||||
var debug = require('debug')('http')
|
||||
, http = require('http')
|
||||
, name = 'My App';
|
||||
|
||||
// fake app
|
||||
|
||||
debug('booting %o', name);
|
||||
|
||||
http.createServer(function(req, res){
|
||||
debug(req.method + ' ' + req.url);
|
||||
res.end('hello\n');
|
||||
}).listen(3000, function(){
|
||||
debug('listening');
|
||||
});
|
||||
|
||||
// fake worker of some kind
|
||||
|
||||
require('./worker');
|
||||
```
|
||||
|
||||
Example [_worker.js_](./examples/node/worker.js):
|
||||
|
||||
```js
|
||||
var a = require('debug')('worker:a')
|
||||
, b = require('debug')('worker:b');
|
||||
|
||||
function work() {
|
||||
a('doing lots of uninteresting work');
|
||||
setTimeout(work, Math.random() * 1000);
|
||||
}
|
||||
|
||||
work();
|
||||
|
||||
function workb() {
|
||||
b('doing some work');
|
||||
setTimeout(workb, Math.random() * 2000);
|
||||
}
|
||||
|
||||
workb();
|
||||
```
|
||||
|
||||
The `DEBUG` environment variable is then used to enable these based on space or
|
||||
comma-delimited names.
|
||||
|
||||
Here are some examples:
|
||||
|
||||
<img width="647" alt="screen shot 2017-08-08 at 12 53 04 pm" src="https://user-images.githubusercontent.com/71256/29091703-a6302cdc-7c38-11e7-8304-7c0b3bc600cd.png">
|
||||
<img width="647" alt="screen shot 2017-08-08 at 12 53 38 pm" src="https://user-images.githubusercontent.com/71256/29091700-a62a6888-7c38-11e7-800b-db911291ca2b.png">
|
||||
<img width="647" alt="screen shot 2017-08-08 at 12 53 25 pm" src="https://user-images.githubusercontent.com/71256/29091701-a62ea114-7c38-11e7-826a-2692bedca740.png">
|
||||
|
||||
#### Windows command prompt notes
|
||||
|
||||
##### CMD
|
||||
|
||||
On Windows the environment variable is set using the `set` command.
|
||||
|
||||
```cmd
|
||||
set DEBUG=*,-not_this
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```cmd
|
||||
set DEBUG=* & node app.js
|
||||
```
|
||||
|
||||
##### PowerShell (VS Code default)
|
||||
|
||||
PowerShell uses different syntax to set environment variables.
|
||||
|
||||
```cmd
|
||||
$env:DEBUG = "*,-not_this"
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```cmd
|
||||
$env:DEBUG='app';node app.js
|
||||
```
|
||||
|
||||
Then, run the program to be debugged as usual.
|
||||
|
||||
npm script example:
|
||||
```js
|
||||
"windowsDebug": "@powershell -Command $env:DEBUG='*';node app.js",
|
||||
```
|
||||
|
||||
## Namespace Colors
|
||||
|
||||
Every debug instance has a color generated for it based on its namespace name.
|
||||
This helps when visually parsing the debug output to identify which debug instance
|
||||
a debug line belongs to.
|
||||
|
||||
#### Node.js
|
||||
|
||||
In Node.js, colors are enabled when stderr is a TTY. You also _should_ install
|
||||
the [`supports-color`](https://npmjs.org/supports-color) module alongside debug,
|
||||
otherwise debug will only use a small handful of basic colors.
|
||||
|
||||
<img width="521" src="https://user-images.githubusercontent.com/71256/29092181-47f6a9e6-7c3a-11e7-9a14-1928d8a711cd.png">
|
||||
|
||||
#### Web Browser
|
||||
|
||||
Colors are also enabled on "Web Inspectors" that understand the `%c` formatting
|
||||
option. These are WebKit web inspectors, Firefox ([since version
|
||||
31](https://hacks.mozilla.org/2014/05/editable-box-model-multiple-selection-sublime-text-keys-much-more-firefox-developer-tools-episode-31/))
|
||||
and the Firebug plugin for Firefox (any version).
|
||||
|
||||
<img width="524" src="https://user-images.githubusercontent.com/71256/29092033-b65f9f2e-7c39-11e7-8e32-f6f0d8e865c1.png">
|
||||
|
||||
|
||||
## Millisecond diff
|
||||
|
||||
When actively developing an application it can be useful to see when the time spent between one `debug()` call and the next. Suppose for example you invoke `debug()` before requesting a resource, and after as well, the "+NNNms" will show you how much time was spent between calls.
|
||||
|
||||
<img width="647" src="https://user-images.githubusercontent.com/71256/29091486-fa38524c-7c37-11e7-895f-e7ec8e1039b6.png">
|
||||
|
||||
When stdout is not a TTY, `Date#toISOString()` is used, making it more useful for logging the debug information as shown below:
|
||||
|
||||
<img width="647" src="https://user-images.githubusercontent.com/71256/29091956-6bd78372-7c39-11e7-8c55-c948396d6edd.png">
|
||||
|
||||
|
||||
## Conventions
|
||||
|
||||
If you're using this in one or more of your libraries, you _should_ use the name of your library so that developers may toggle debugging as desired without guessing names. If you have more than one debuggers you _should_ prefix them with your library name and use ":" to separate features. For example "bodyParser" from Connect would then be "connect:bodyParser". If you append a "*" to the end of your name, it will always be enabled regardless of the setting of the DEBUG environment variable. You can then use it for normal output as well as debug output.
|
||||
|
||||
## Wildcards
|
||||
|
||||
The `*` character may be used as a wildcard. Suppose for example your library has
|
||||
debuggers named "connect:bodyParser", "connect:compress", "connect:session",
|
||||
instead of listing all three with
|
||||
`DEBUG=connect:bodyParser,connect:compress,connect:session`, you may simply do
|
||||
`DEBUG=connect:*`, or to run everything using this module simply use `DEBUG=*`.
|
||||
|
||||
You can also exclude specific debuggers by prefixing them with a "-" character.
|
||||
For example, `DEBUG=*,-connect:*` would include all debuggers except those
|
||||
starting with "connect:".
|
||||
|
||||
## Environment Variables
|
||||
|
||||
When running through Node.js, you can set a few environment variables that will
|
||||
change the behavior of the debug logging:
|
||||
|
||||
| Name | Purpose |
|
||||
|-----------|-------------------------------------------------|
|
||||
| `DEBUG` | Enables/disables specific debugging namespaces. |
|
||||
| `DEBUG_HIDE_DATE` | Hide date from debug output (non-TTY). |
|
||||
| `DEBUG_COLORS`| Whether or not to use colors in the debug output. |
|
||||
| `DEBUG_DEPTH` | Object inspection depth. |
|
||||
| `DEBUG_SHOW_HIDDEN` | Shows hidden properties on inspected objects. |
|
||||
|
||||
|
||||
__Note:__ The environment variables beginning with `DEBUG_` end up being
|
||||
converted into an Options object that gets used with `%o`/`%O` formatters.
|
||||
See the Node.js documentation for
|
||||
[`util.inspect()`](https://nodejs.org/api/util.html#util_util_inspect_object_options)
|
||||
for the complete list.
|
||||
|
||||
## Formatters
|
||||
|
||||
Debug uses [printf-style](https://wikipedia.org/wiki/Printf_format_string) formatting.
|
||||
Below are the officially supported formatters:
|
||||
|
||||
| Formatter | Representation |
|
||||
|-----------|----------------|
|
||||
| `%O` | Pretty-print an Object on multiple lines. |
|
||||
| `%o` | Pretty-print an Object all on a single line. |
|
||||
| `%s` | String. |
|
||||
| `%d` | Number (both integer and float). |
|
||||
| `%j` | JSON. Replaced with the string '[Circular]' if the argument contains circular references. |
|
||||
| `%%` | Single percent sign ('%'). This does not consume an argument. |
|
||||
|
||||
|
||||
### Custom formatters
|
||||
|
||||
You can add custom formatters by extending the `debug.formatters` object.
|
||||
For example, if you wanted to add support for rendering a Buffer as hex with
|
||||
`%h`, you could do something like:
|
||||
|
||||
```js
|
||||
const createDebug = require('debug')
|
||||
createDebug.formatters.h = (v) => {
|
||||
return v.toString('hex')
|
||||
}
|
||||
|
||||
// …elsewhere
|
||||
const debug = createDebug('foo')
|
||||
debug('this is hex: %h', new Buffer('hello world'))
|
||||
// foo this is hex: 68656c6c6f20776f726c6421 +0ms
|
||||
```
|
||||
|
||||
|
||||
## Browser Support
|
||||
|
||||
You can build a browser-ready script using [browserify](https://github.com/substack/node-browserify),
|
||||
or just use the [browserify-as-a-service](https://wzrd.in/) [build](https://wzrd.in/standalone/debug@latest),
|
||||
if you don't want to build it yourself.
|
||||
|
||||
Debug's enable state is currently persisted by `localStorage`.
|
||||
Consider the situation shown below where you have `worker:a` and `worker:b`,
|
||||
and wish to debug both. You can enable this using `localStorage.debug`:
|
||||
|
||||
```js
|
||||
localStorage.debug = 'worker:*'
|
||||
```
|
||||
|
||||
And then refresh the page.
|
||||
|
||||
```js
|
||||
a = debug('worker:a');
|
||||
b = debug('worker:b');
|
||||
|
||||
setInterval(function(){
|
||||
a('doing some work');
|
||||
}, 1000);
|
||||
|
||||
setInterval(function(){
|
||||
b('doing some work');
|
||||
}, 1200);
|
||||
```
|
||||
|
||||
In Chromium-based web browsers (e.g. Brave, Chrome, and Electron), the JavaScript console will—by default—only show messages logged by `debug` if the "Verbose" log level is _enabled_.
|
||||
|
||||
<img width="647" src="https://user-images.githubusercontent.com/7143133/152083257-29034707-c42c-4959-8add-3cee850e6fcf.png">
|
||||
|
||||
## Output streams
|
||||
|
||||
By default `debug` will log to stderr, however this can be configured per-namespace by overriding the `log` method:
|
||||
|
||||
Example [_stdout.js_](./examples/node/stdout.js):
|
||||
|
||||
```js
|
||||
var debug = require('debug');
|
||||
var error = debug('app:error');
|
||||
|
||||
// by default stderr is used
|
||||
error('goes to stderr!');
|
||||
|
||||
var log = debug('app:log');
|
||||
// set this namespace to log via console.log
|
||||
log.log = console.log.bind(console); // don't forget to bind to console!
|
||||
log('goes to stdout');
|
||||
error('still goes to stderr!');
|
||||
|
||||
// set all output to go via console.info
|
||||
// overrides all per-namespace log settings
|
||||
debug.log = console.info.bind(console);
|
||||
error('now goes to stdout via console.info');
|
||||
log('still goes to stdout, but via console.info now');
|
||||
```
|
||||
|
||||
## Extend
|
||||
You can simply extend debugger
|
||||
```js
|
||||
const log = require('debug')('auth');
|
||||
|
||||
//creates new debug instance with extended namespace
|
||||
const logSign = log.extend('sign');
|
||||
const logLogin = log.extend('login');
|
||||
|
||||
log('hello'); // auth hello
|
||||
logSign('hello'); //auth:sign hello
|
||||
logLogin('hello'); //auth:login hello
|
||||
```
|
||||
|
||||
## Set dynamically
|
||||
|
||||
You can also enable debug dynamically by calling the `enable()` method :
|
||||
|
||||
```js
|
||||
let debug = require('debug');
|
||||
|
||||
console.log(1, debug.enabled('test'));
|
||||
|
||||
debug.enable('test');
|
||||
console.log(2, debug.enabled('test'));
|
||||
|
||||
debug.disable();
|
||||
console.log(3, debug.enabled('test'));
|
||||
|
||||
```
|
||||
|
||||
print :
|
||||
```
|
||||
1 false
|
||||
2 true
|
||||
3 false
|
||||
```
|
||||
|
||||
Usage :
|
||||
`enable(namespaces)`
|
||||
`namespaces` can include modes separated by a colon and wildcards.
|
||||
|
||||
Note that calling `enable()` completely overrides previously set DEBUG variable :
|
||||
|
||||
```
|
||||
$ DEBUG=foo node -e 'var dbg = require("debug"); dbg.enable("bar"); console.log(dbg.enabled("foo"))'
|
||||
=> false
|
||||
```
|
||||
|
||||
`disable()`
|
||||
|
||||
Will disable all namespaces. The functions returns the namespaces currently
|
||||
enabled (and skipped). This can be useful if you want to disable debugging
|
||||
temporarily without knowing what was enabled to begin with.
|
||||
|
||||
For example:
|
||||
|
||||
```js
|
||||
let debug = require('debug');
|
||||
debug.enable('foo:*,-foo:bar');
|
||||
let namespaces = debug.disable();
|
||||
debug.enable(namespaces);
|
||||
```
|
||||
|
||||
Note: There is no guarantee that the string will be identical to the initial
|
||||
enable string, but semantically they will be identical.
|
||||
|
||||
## Checking whether a debug target is enabled
|
||||
|
||||
After you've created a debug instance, you can determine whether or not it is
|
||||
enabled by checking the `enabled` property:
|
||||
|
||||
```javascript
|
||||
const debug = require('debug')('http');
|
||||
|
||||
if (debug.enabled) {
|
||||
// do stuff...
|
||||
}
|
||||
```
|
||||
|
||||
You can also manually toggle this property to force the debug instance to be
|
||||
enabled or disabled.
|
||||
|
||||
## Usage in child processes
|
||||
|
||||
Due to the way `debug` detects if the output is a TTY or not, colors are not shown in child processes when `stderr` is piped. A solution is to pass the `DEBUG_COLORS=1` environment variable to the child process.
|
||||
For example:
|
||||
|
||||
```javascript
|
||||
worker = fork(WORKER_WRAP_PATH, [workerPath], {
|
||||
stdio: [
|
||||
/* stdin: */ 0,
|
||||
/* stdout: */ 'pipe',
|
||||
/* stderr: */ 'pipe',
|
||||
'ipc',
|
||||
],
|
||||
env: Object.assign({}, process.env, {
|
||||
DEBUG_COLORS: 1 // without this settings, colors won't be shown
|
||||
}),
|
||||
});
|
||||
|
||||
worker.stderr.pipe(process.stderr, { end: false });
|
||||
```
|
||||
|
||||
|
||||
## Authors
|
||||
|
||||
- TJ Holowaychuk
|
||||
- Nathan Rajlich
|
||||
- Andrew Rhyne
|
||||
- Josh Junon
|
||||
|
||||
## Backers
|
||||
|
||||
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/debug#backer)]
|
||||
|
||||
<a href="https://opencollective.com/debug/backer/0/website" target="_blank"><img src="https://opencollective.com/debug/backer/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/1/website" target="_blank"><img src="https://opencollective.com/debug/backer/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/2/website" target="_blank"><img src="https://opencollective.com/debug/backer/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/3/website" target="_blank"><img src="https://opencollective.com/debug/backer/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/4/website" target="_blank"><img src="https://opencollective.com/debug/backer/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/5/website" target="_blank"><img src="https://opencollective.com/debug/backer/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/6/website" target="_blank"><img src="https://opencollective.com/debug/backer/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/7/website" target="_blank"><img src="https://opencollective.com/debug/backer/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/8/website" target="_blank"><img src="https://opencollective.com/debug/backer/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/9/website" target="_blank"><img src="https://opencollective.com/debug/backer/9/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/10/website" target="_blank"><img src="https://opencollective.com/debug/backer/10/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/11/website" target="_blank"><img src="https://opencollective.com/debug/backer/11/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/12/website" target="_blank"><img src="https://opencollective.com/debug/backer/12/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/13/website" target="_blank"><img src="https://opencollective.com/debug/backer/13/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/14/website" target="_blank"><img src="https://opencollective.com/debug/backer/14/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/15/website" target="_blank"><img src="https://opencollective.com/debug/backer/15/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/16/website" target="_blank"><img src="https://opencollective.com/debug/backer/16/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/17/website" target="_blank"><img src="https://opencollective.com/debug/backer/17/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/18/website" target="_blank"><img src="https://opencollective.com/debug/backer/18/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/19/website" target="_blank"><img src="https://opencollective.com/debug/backer/19/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/20/website" target="_blank"><img src="https://opencollective.com/debug/backer/20/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/21/website" target="_blank"><img src="https://opencollective.com/debug/backer/21/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/22/website" target="_blank"><img src="https://opencollective.com/debug/backer/22/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/23/website" target="_blank"><img src="https://opencollective.com/debug/backer/23/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/24/website" target="_blank"><img src="https://opencollective.com/debug/backer/24/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/25/website" target="_blank"><img src="https://opencollective.com/debug/backer/25/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/26/website" target="_blank"><img src="https://opencollective.com/debug/backer/26/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/27/website" target="_blank"><img src="https://opencollective.com/debug/backer/27/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/28/website" target="_blank"><img src="https://opencollective.com/debug/backer/28/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/backer/29/website" target="_blank"><img src="https://opencollective.com/debug/backer/29/avatar.svg"></a>
|
||||
|
||||
|
||||
## Sponsors
|
||||
|
||||
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/debug#sponsor)]
|
||||
|
||||
<a href="https://opencollective.com/debug/sponsor/0/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/1/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/2/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/3/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/4/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/5/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/6/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/7/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/8/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/9/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/9/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/10/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/10/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/11/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/11/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/12/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/12/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/13/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/13/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/14/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/14/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/15/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/15/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/16/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/16/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/17/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/17/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/18/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/18/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/19/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/19/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/20/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/20/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/21/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/21/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/22/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/22/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/23/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/23/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/24/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/24/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/25/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/25/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/26/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/26/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/27/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/27/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/28/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/28/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/debug/sponsor/29/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/29/avatar.svg"></a>
|
||||
|
||||
## License
|
||||
|
||||
(The MIT License)
|
||||
|
||||
Copyright (c) 2014-2017 TJ Holowaychuk <tj@vision-media.ca>
|
||||
Copyright (c) 2018-2021 Josh Junon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
'Software'), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
64
node_modules/ioredis/node_modules/debug/package.json
generated
vendored
Normal file
64
node_modules/ioredis/node_modules/debug/package.json
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"name": "debug",
|
||||
"version": "4.4.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/debug-js/debug.git"
|
||||
},
|
||||
"description": "Lightweight debugging utility for Node.js and the browser",
|
||||
"keywords": [
|
||||
"debug",
|
||||
"log",
|
||||
"debugger"
|
||||
],
|
||||
"files": [
|
||||
"src",
|
||||
"LICENSE",
|
||||
"README.md"
|
||||
],
|
||||
"author": "Josh Junon (https://github.com/qix-)",
|
||||
"contributors": [
|
||||
"TJ Holowaychuk <tj@vision-media.ca>",
|
||||
"Nathan Rajlich <nathan@tootallnate.net> (http://n8.io)",
|
||||
"Andrew Rhyne <rhyneandrew@gmail.com>"
|
||||
],
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"lint": "xo",
|
||||
"test": "npm run test:node && npm run test:browser && npm run lint",
|
||||
"test:node": "mocha test.js test.node.js",
|
||||
"test:browser": "karma start --single-run",
|
||||
"test:coverage": "cat ./coverage/lcov.info | coveralls"
|
||||
},
|
||||
"dependencies": {
|
||||
"ms": "^2.1.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"brfs": "^2.0.1",
|
||||
"browserify": "^16.2.3",
|
||||
"coveralls": "^3.0.2",
|
||||
"karma": "^3.1.4",
|
||||
"karma-browserify": "^6.0.0",
|
||||
"karma-chrome-launcher": "^2.2.0",
|
||||
"karma-mocha": "^1.3.0",
|
||||
"mocha": "^5.2.0",
|
||||
"mocha-lcov-reporter": "^1.2.0",
|
||||
"sinon": "^14.0.0",
|
||||
"xo": "^0.23.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"supports-color": {
|
||||
"optional": true
|
||||
}
|
||||
},
|
||||
"main": "./src/index.js",
|
||||
"browser": "./src/browser.js",
|
||||
"engines": {
|
||||
"node": ">=6.0"
|
||||
},
|
||||
"xo": {
|
||||
"rules": {
|
||||
"import/extensions": "off"
|
||||
}
|
||||
}
|
||||
}
|
||||
272
node_modules/ioredis/node_modules/debug/src/browser.js
generated
vendored
Normal file
272
node_modules/ioredis/node_modules/debug/src/browser.js
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
/* eslint-env browser */
|
||||
|
||||
/**
|
||||
* This is the web browser implementation of `debug()`.
|
||||
*/
|
||||
|
||||
exports.formatArgs = formatArgs;
|
||||
exports.save = save;
|
||||
exports.load = load;
|
||||
exports.useColors = useColors;
|
||||
exports.storage = localstorage();
|
||||
exports.destroy = (() => {
|
||||
let warned = false;
|
||||
|
||||
return () => {
|
||||
if (!warned) {
|
||||
warned = true;
|
||||
console.warn('Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`.');
|
||||
}
|
||||
};
|
||||
})();
|
||||
|
||||
/**
|
||||
* Colors.
|
||||
*/
|
||||
|
||||
exports.colors = [
|
||||
'#0000CC',
|
||||
'#0000FF',
|
||||
'#0033CC',
|
||||
'#0033FF',
|
||||
'#0066CC',
|
||||
'#0066FF',
|
||||
'#0099CC',
|
||||
'#0099FF',
|
||||
'#00CC00',
|
||||
'#00CC33',
|
||||
'#00CC66',
|
||||
'#00CC99',
|
||||
'#00CCCC',
|
||||
'#00CCFF',
|
||||
'#3300CC',
|
||||
'#3300FF',
|
||||
'#3333CC',
|
||||
'#3333FF',
|
||||
'#3366CC',
|
||||
'#3366FF',
|
||||
'#3399CC',
|
||||
'#3399FF',
|
||||
'#33CC00',
|
||||
'#33CC33',
|
||||
'#33CC66',
|
||||
'#33CC99',
|
||||
'#33CCCC',
|
||||
'#33CCFF',
|
||||
'#6600CC',
|
||||
'#6600FF',
|
||||
'#6633CC',
|
||||
'#6633FF',
|
||||
'#66CC00',
|
||||
'#66CC33',
|
||||
'#9900CC',
|
||||
'#9900FF',
|
||||
'#9933CC',
|
||||
'#9933FF',
|
||||
'#99CC00',
|
||||
'#99CC33',
|
||||
'#CC0000',
|
||||
'#CC0033',
|
||||
'#CC0066',
|
||||
'#CC0099',
|
||||
'#CC00CC',
|
||||
'#CC00FF',
|
||||
'#CC3300',
|
||||
'#CC3333',
|
||||
'#CC3366',
|
||||
'#CC3399',
|
||||
'#CC33CC',
|
||||
'#CC33FF',
|
||||
'#CC6600',
|
||||
'#CC6633',
|
||||
'#CC9900',
|
||||
'#CC9933',
|
||||
'#CCCC00',
|
||||
'#CCCC33',
|
||||
'#FF0000',
|
||||
'#FF0033',
|
||||
'#FF0066',
|
||||
'#FF0099',
|
||||
'#FF00CC',
|
||||
'#FF00FF',
|
||||
'#FF3300',
|
||||
'#FF3333',
|
||||
'#FF3366',
|
||||
'#FF3399',
|
||||
'#FF33CC',
|
||||
'#FF33FF',
|
||||
'#FF6600',
|
||||
'#FF6633',
|
||||
'#FF9900',
|
||||
'#FF9933',
|
||||
'#FFCC00',
|
||||
'#FFCC33'
|
||||
];
|
||||
|
||||
/**
|
||||
* Currently only WebKit-based Web Inspectors, Firefox >= v31,
|
||||
* and the Firebug extension (any Firefox version) are known
|
||||
* to support "%c" CSS customizations.
|
||||
*
|
||||
* TODO: add a `localStorage` variable to explicitly enable/disable colors
|
||||
*/
|
||||
|
||||
// eslint-disable-next-line complexity
|
||||
function useColors() {
|
||||
// NB: In an Electron preload script, document will be defined but not fully
|
||||
// initialized. Since we know we're in Chrome, we'll just detect this case
|
||||
// explicitly
|
||||
if (typeof window !== 'undefined' && window.process && (window.process.type === 'renderer' || window.process.__nwjs)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Internet Explorer and Edge do not support colors.
|
||||
if (typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/(edge|trident)\/(\d+)/)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let m;
|
||||
|
||||
// Is webkit? http://stackoverflow.com/a/16459606/376773
|
||||
// document is undefined in react-native: https://github.com/facebook/react-native/pull/1632
|
||||
// eslint-disable-next-line no-return-assign
|
||||
return (typeof document !== 'undefined' && document.documentElement && document.documentElement.style && document.documentElement.style.WebkitAppearance) ||
|
||||
// Is firebug? http://stackoverflow.com/a/398120/376773
|
||||
(typeof window !== 'undefined' && window.console && (window.console.firebug || (window.console.exception && window.console.table))) ||
|
||||
// Is firefox >= v31?
|
||||
// https://developer.mozilla.org/en-US/docs/Tools/Web_Console#Styling_messages
|
||||
(typeof navigator !== 'undefined' && navigator.userAgent && (m = navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/)) && parseInt(m[1], 10) >= 31) ||
|
||||
// Double check webkit in userAgent just in case we are in a worker
|
||||
(typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/));
|
||||
}
|
||||
|
||||
/**
|
||||
* Colorize log arguments if enabled.
|
||||
*
|
||||
* @api public
|
||||
*/
|
||||
|
||||
function formatArgs(args) {
|
||||
args[0] = (this.useColors ? '%c' : '') +
|
||||
this.namespace +
|
||||
(this.useColors ? ' %c' : ' ') +
|
||||
args[0] +
|
||||
(this.useColors ? '%c ' : ' ') +
|
||||
'+' + module.exports.humanize(this.diff);
|
||||
|
||||
if (!this.useColors) {
|
||||
return;
|
||||
}
|
||||
|
||||
const c = 'color: ' + this.color;
|
||||
args.splice(1, 0, c, 'color: inherit');
|
||||
|
||||
// The final "%c" is somewhat tricky, because there could be other
|
||||
// arguments passed either before or after the %c, so we need to
|
||||
// figure out the correct index to insert the CSS into
|
||||
let index = 0;
|
||||
let lastC = 0;
|
||||
args[0].replace(/%[a-zA-Z%]/g, match => {
|
||||
if (match === '%%') {
|
||||
return;
|
||||
}
|
||||
index++;
|
||||
if (match === '%c') {
|
||||
// We only are interested in the *last* %c
|
||||
// (the user may have provided their own)
|
||||
lastC = index;
|
||||
}
|
||||
});
|
||||
|
||||
args.splice(lastC, 0, c);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes `console.debug()` when available.
|
||||
* No-op when `console.debug` is not a "function".
|
||||
* If `console.debug` is not available, falls back
|
||||
* to `console.log`.
|
||||
*
|
||||
* @api public
|
||||
*/
|
||||
exports.log = console.debug || console.log || (() => {});
|
||||
|
||||
/**
|
||||
* Save `namespaces`.
|
||||
*
|
||||
* @param {String} namespaces
|
||||
* @api private
|
||||
*/
|
||||
function save(namespaces) {
|
||||
try {
|
||||
if (namespaces) {
|
||||
exports.storage.setItem('debug', namespaces);
|
||||
} else {
|
||||
exports.storage.removeItem('debug');
|
||||
}
|
||||
} catch (error) {
|
||||
// Swallow
|
||||
// XXX (@Qix-) should we be logging these?
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load `namespaces`.
|
||||
*
|
||||
* @return {String} returns the previously persisted debug modes
|
||||
* @api private
|
||||
*/
|
||||
function load() {
|
||||
let r;
|
||||
try {
|
||||
r = exports.storage.getItem('debug') || exports.storage.getItem('DEBUG') ;
|
||||
} catch (error) {
|
||||
// Swallow
|
||||
// XXX (@Qix-) should we be logging these?
|
||||
}
|
||||
|
||||
// If debug isn't set in LS, and we're in Electron, try to load $DEBUG
|
||||
if (!r && typeof process !== 'undefined' && 'env' in process) {
|
||||
r = process.env.DEBUG;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Localstorage attempts to return the localstorage.
|
||||
*
|
||||
* This is necessary because safari throws
|
||||
* when a user disables cookies/localstorage
|
||||
* and you attempt to access it.
|
||||
*
|
||||
* @return {LocalStorage}
|
||||
* @api private
|
||||
*/
|
||||
|
||||
function localstorage() {
|
||||
try {
|
||||
// TVMLKit (Apple TV JS Runtime) does not have a window object, just localStorage in the global context
|
||||
// The Browser also has localStorage in the global context.
|
||||
return localStorage;
|
||||
} catch (error) {
|
||||
// Swallow
|
||||
// XXX (@Qix-) should we be logging these?
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = require('./common')(exports);
|
||||
|
||||
const {formatters} = module.exports;
|
||||
|
||||
/**
|
||||
* Map %j to `JSON.stringify()`, since no Web Inspectors do that by default.
|
||||
*/
|
||||
|
||||
formatters.j = function (v) {
|
||||
try {
|
||||
return JSON.stringify(v);
|
||||
} catch (error) {
|
||||
return '[UnexpectedJSONParseError]: ' + error.message;
|
||||
}
|
||||
};
|
||||
292
node_modules/ioredis/node_modules/debug/src/common.js
generated
vendored
Normal file
292
node_modules/ioredis/node_modules/debug/src/common.js
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
|
||||
/**
|
||||
* This is the common logic for both the Node.js and web browser
|
||||
* implementations of `debug()`.
|
||||
*/
|
||||
|
||||
function setup(env) {
|
||||
createDebug.debug = createDebug;
|
||||
createDebug.default = createDebug;
|
||||
createDebug.coerce = coerce;
|
||||
createDebug.disable = disable;
|
||||
createDebug.enable = enable;
|
||||
createDebug.enabled = enabled;
|
||||
createDebug.humanize = require('ms');
|
||||
createDebug.destroy = destroy;
|
||||
|
||||
Object.keys(env).forEach(key => {
|
||||
createDebug[key] = env[key];
|
||||
});
|
||||
|
||||
/**
|
||||
* The currently active debug mode names, and names to skip.
|
||||
*/
|
||||
|
||||
createDebug.names = [];
|
||||
createDebug.skips = [];
|
||||
|
||||
/**
|
||||
* Map of special "%n" handling functions, for the debug "format" argument.
|
||||
*
|
||||
* Valid key names are a single, lower or upper-case letter, i.e. "n" and "N".
|
||||
*/
|
||||
createDebug.formatters = {};
|
||||
|
||||
/**
|
||||
* Selects a color for a debug namespace
|
||||
* @param {String} namespace The namespace string for the debug instance to be colored
|
||||
* @return {Number|String} An ANSI color code for the given namespace
|
||||
* @api private
|
||||
*/
|
||||
function selectColor(namespace) {
|
||||
let hash = 0;
|
||||
|
||||
for (let i = 0; i < namespace.length; i++) {
|
||||
hash = ((hash << 5) - hash) + namespace.charCodeAt(i);
|
||||
hash |= 0; // Convert to 32bit integer
|
||||
}
|
||||
|
||||
return createDebug.colors[Math.abs(hash) % createDebug.colors.length];
|
||||
}
|
||||
createDebug.selectColor = selectColor;
|
||||
|
||||
/**
|
||||
* Create a debugger with the given `namespace`.
|
||||
*
|
||||
* @param {String} namespace
|
||||
* @return {Function}
|
||||
* @api public
|
||||
*/
|
||||
function createDebug(namespace) {
|
||||
let prevTime;
|
||||
let enableOverride = null;
|
||||
let namespacesCache;
|
||||
let enabledCache;
|
||||
|
||||
function debug(...args) {
|
||||
// Disabled?
|
||||
if (!debug.enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
const self = debug;
|
||||
|
||||
// Set `diff` timestamp
|
||||
const curr = Number(new Date());
|
||||
const ms = curr - (prevTime || curr);
|
||||
self.diff = ms;
|
||||
self.prev = prevTime;
|
||||
self.curr = curr;
|
||||
prevTime = curr;
|
||||
|
||||
args[0] = createDebug.coerce(args[0]);
|
||||
|
||||
if (typeof args[0] !== 'string') {
|
||||
// Anything else let's inspect with %O
|
||||
args.unshift('%O');
|
||||
}
|
||||
|
||||
// Apply any `formatters` transformations
|
||||
let index = 0;
|
||||
args[0] = args[0].replace(/%([a-zA-Z%])/g, (match, format) => {
|
||||
// If we encounter an escaped % then don't increase the array index
|
||||
if (match === '%%') {
|
||||
return '%';
|
||||
}
|
||||
index++;
|
||||
const formatter = createDebug.formatters[format];
|
||||
if (typeof formatter === 'function') {
|
||||
const val = args[index];
|
||||
match = formatter.call(self, val);
|
||||
|
||||
// Now we need to remove `args[index]` since it's inlined in the `format`
|
||||
args.splice(index, 1);
|
||||
index--;
|
||||
}
|
||||
return match;
|
||||
});
|
||||
|
||||
// Apply env-specific formatting (colors, etc.)
|
||||
createDebug.formatArgs.call(self, args);
|
||||
|
||||
const logFn = self.log || createDebug.log;
|
||||
logFn.apply(self, args);
|
||||
}
|
||||
|
||||
debug.namespace = namespace;
|
||||
debug.useColors = createDebug.useColors();
|
||||
debug.color = createDebug.selectColor(namespace);
|
||||
debug.extend = extend;
|
||||
debug.destroy = createDebug.destroy; // XXX Temporary. Will be removed in the next major release.
|
||||
|
||||
Object.defineProperty(debug, 'enabled', {
|
||||
enumerable: true,
|
||||
configurable: false,
|
||||
get: () => {
|
||||
if (enableOverride !== null) {
|
||||
return enableOverride;
|
||||
}
|
||||
if (namespacesCache !== createDebug.namespaces) {
|
||||
namespacesCache = createDebug.namespaces;
|
||||
enabledCache = createDebug.enabled(namespace);
|
||||
}
|
||||
|
||||
return enabledCache;
|
||||
},
|
||||
set: v => {
|
||||
enableOverride = v;
|
||||
}
|
||||
});
|
||||
|
||||
// Env-specific initialization logic for debug instances
|
||||
if (typeof createDebug.init === 'function') {
|
||||
createDebug.init(debug);
|
||||
}
|
||||
|
||||
return debug;
|
||||
}
|
||||
|
||||
function extend(namespace, delimiter) {
|
||||
const newDebug = createDebug(this.namespace + (typeof delimiter === 'undefined' ? ':' : delimiter) + namespace);
|
||||
newDebug.log = this.log;
|
||||
return newDebug;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enables a debug mode by namespaces. This can include modes
|
||||
* separated by a colon and wildcards.
|
||||
*
|
||||
* @param {String} namespaces
|
||||
* @api public
|
||||
*/
|
||||
function enable(namespaces) {
|
||||
createDebug.save(namespaces);
|
||||
createDebug.namespaces = namespaces;
|
||||
|
||||
createDebug.names = [];
|
||||
createDebug.skips = [];
|
||||
|
||||
const split = (typeof namespaces === 'string' ? namespaces : '')
|
||||
.trim()
|
||||
.replace(/\s+/g, ',')
|
||||
.split(',')
|
||||
.filter(Boolean);
|
||||
|
||||
for (const ns of split) {
|
||||
if (ns[0] === '-') {
|
||||
createDebug.skips.push(ns.slice(1));
|
||||
} else {
|
||||
createDebug.names.push(ns);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given string matches a namespace template, honoring
|
||||
* asterisks as wildcards.
|
||||
*
|
||||
* @param {String} search
|
||||
* @param {String} template
|
||||
* @return {Boolean}
|
||||
*/
|
||||
function matchesTemplate(search, template) {
|
||||
let searchIndex = 0;
|
||||
let templateIndex = 0;
|
||||
let starIndex = -1;
|
||||
let matchIndex = 0;
|
||||
|
||||
while (searchIndex < search.length) {
|
||||
if (templateIndex < template.length && (template[templateIndex] === search[searchIndex] || template[templateIndex] === '*')) {
|
||||
// Match character or proceed with wildcard
|
||||
if (template[templateIndex] === '*') {
|
||||
starIndex = templateIndex;
|
||||
matchIndex = searchIndex;
|
||||
templateIndex++; // Skip the '*'
|
||||
} else {
|
||||
searchIndex++;
|
||||
templateIndex++;
|
||||
}
|
||||
} else if (starIndex !== -1) { // eslint-disable-line no-negated-condition
|
||||
// Backtrack to the last '*' and try to match more characters
|
||||
templateIndex = starIndex + 1;
|
||||
matchIndex++;
|
||||
searchIndex = matchIndex;
|
||||
} else {
|
||||
return false; // No match
|
||||
}
|
||||
}
|
||||
|
||||
// Handle trailing '*' in template
|
||||
while (templateIndex < template.length && template[templateIndex] === '*') {
|
||||
templateIndex++;
|
||||
}
|
||||
|
||||
return templateIndex === template.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable debug output.
|
||||
*
|
||||
* @return {String} namespaces
|
||||
* @api public
|
||||
*/
|
||||
function disable() {
|
||||
const namespaces = [
|
||||
...createDebug.names,
|
||||
...createDebug.skips.map(namespace => '-' + namespace)
|
||||
].join(',');
|
||||
createDebug.enable('');
|
||||
return namespaces;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the given mode name is enabled, false otherwise.
|
||||
*
|
||||
* @param {String} name
|
||||
* @return {Boolean}
|
||||
* @api public
|
||||
*/
|
||||
function enabled(name) {
|
||||
for (const skip of createDebug.skips) {
|
||||
if (matchesTemplate(name, skip)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (const ns of createDebug.names) {
|
||||
if (matchesTemplate(name, ns)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Coerce `val`.
|
||||
*
|
||||
* @param {Mixed} val
|
||||
* @return {Mixed}
|
||||
* @api private
|
||||
*/
|
||||
function coerce(val) {
|
||||
if (val instanceof Error) {
|
||||
return val.stack || val.message;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
/**
|
||||
* XXX DO NOT USE. This is a temporary stub function.
|
||||
* XXX It WILL be removed in the next major release.
|
||||
*/
|
||||
function destroy() {
|
||||
console.warn('Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`.');
|
||||
}
|
||||
|
||||
createDebug.enable(createDebug.load());
|
||||
|
||||
return createDebug;
|
||||
}
|
||||
|
||||
module.exports = setup;
|
||||
10
node_modules/ioredis/node_modules/debug/src/index.js
generated
vendored
Normal file
10
node_modules/ioredis/node_modules/debug/src/index.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
/**
|
||||
* Detect Electron renderer / nwjs process, which is node, but we should
|
||||
* treat as a browser.
|
||||
*/
|
||||
|
||||
if (typeof process === 'undefined' || process.type === 'renderer' || process.browser === true || process.__nwjs) {
|
||||
module.exports = require('./browser.js');
|
||||
} else {
|
||||
module.exports = require('./node.js');
|
||||
}
|
||||
263
node_modules/ioredis/node_modules/debug/src/node.js
generated
vendored
Normal file
263
node_modules/ioredis/node_modules/debug/src/node.js
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
/**
|
||||
* Module dependencies.
|
||||
*/
|
||||
|
||||
const tty = require('tty');
|
||||
const util = require('util');
|
||||
|
||||
/**
|
||||
* This is the Node.js implementation of `debug()`.
|
||||
*/
|
||||
|
||||
exports.init = init;
|
||||
exports.log = log;
|
||||
exports.formatArgs = formatArgs;
|
||||
exports.save = save;
|
||||
exports.load = load;
|
||||
exports.useColors = useColors;
|
||||
exports.destroy = util.deprecate(
|
||||
() => {},
|
||||
'Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`.'
|
||||
);
|
||||
|
||||
/**
|
||||
* Colors.
|
||||
*/
|
||||
|
||||
exports.colors = [6, 2, 3, 4, 5, 1];
|
||||
|
||||
try {
|
||||
// Optional dependency (as in, doesn't need to be installed, NOT like optionalDependencies in package.json)
|
||||
// eslint-disable-next-line import/no-extraneous-dependencies
|
||||
const supportsColor = require('supports-color');
|
||||
|
||||
if (supportsColor && (supportsColor.stderr || supportsColor).level >= 2) {
|
||||
exports.colors = [
|
||||
20,
|
||||
21,
|
||||
26,
|
||||
27,
|
||||
32,
|
||||
33,
|
||||
38,
|
||||
39,
|
||||
40,
|
||||
41,
|
||||
42,
|
||||
43,
|
||||
44,
|
||||
45,
|
||||
56,
|
||||
57,
|
||||
62,
|
||||
63,
|
||||
68,
|
||||
69,
|
||||
74,
|
||||
75,
|
||||
76,
|
||||
77,
|
||||
78,
|
||||
79,
|
||||
80,
|
||||
81,
|
||||
92,
|
||||
93,
|
||||
98,
|
||||
99,
|
||||
112,
|
||||
113,
|
||||
128,
|
||||
129,
|
||||
134,
|
||||
135,
|
||||
148,
|
||||
149,
|
||||
160,
|
||||
161,
|
||||
162,
|
||||
163,
|
||||
164,
|
||||
165,
|
||||
166,
|
||||
167,
|
||||
168,
|
||||
169,
|
||||
170,
|
||||
171,
|
||||
172,
|
||||
173,
|
||||
178,
|
||||
179,
|
||||
184,
|
||||
185,
|
||||
196,
|
||||
197,
|
||||
198,
|
||||
199,
|
||||
200,
|
||||
201,
|
||||
202,
|
||||
203,
|
||||
204,
|
||||
205,
|
||||
206,
|
||||
207,
|
||||
208,
|
||||
209,
|
||||
214,
|
||||
215,
|
||||
220,
|
||||
221
|
||||
];
|
||||
}
|
||||
} catch (error) {
|
||||
// Swallow - we only care if `supports-color` is available; it doesn't have to be.
|
||||
}
|
||||
|
||||
/**
|
||||
* Build up the default `inspectOpts` object from the environment variables.
|
||||
*
|
||||
* $ DEBUG_COLORS=no DEBUG_DEPTH=10 DEBUG_SHOW_HIDDEN=enabled node script.js
|
||||
*/
|
||||
|
||||
exports.inspectOpts = Object.keys(process.env).filter(key => {
|
||||
return /^debug_/i.test(key);
|
||||
}).reduce((obj, key) => {
|
||||
// Camel-case
|
||||
const prop = key
|
||||
.substring(6)
|
||||
.toLowerCase()
|
||||
.replace(/_([a-z])/g, (_, k) => {
|
||||
return k.toUpperCase();
|
||||
});
|
||||
|
||||
// Coerce string value into JS value
|
||||
let val = process.env[key];
|
||||
if (/^(yes|on|true|enabled)$/i.test(val)) {
|
||||
val = true;
|
||||
} else if (/^(no|off|false|disabled)$/i.test(val)) {
|
||||
val = false;
|
||||
} else if (val === 'null') {
|
||||
val = null;
|
||||
} else {
|
||||
val = Number(val);
|
||||
}
|
||||
|
||||
obj[prop] = val;
|
||||
return obj;
|
||||
}, {});
|
||||
|
||||
/**
|
||||
* Is stdout a TTY? Colored output is enabled when `true`.
|
||||
*/
|
||||
|
||||
function useColors() {
|
||||
return 'colors' in exports.inspectOpts ?
|
||||
Boolean(exports.inspectOpts.colors) :
|
||||
tty.isatty(process.stderr.fd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds ANSI color escape codes if enabled.
|
||||
*
|
||||
* @api public
|
||||
*/
|
||||
|
||||
function formatArgs(args) {
|
||||
const {namespace: name, useColors} = this;
|
||||
|
||||
if (useColors) {
|
||||
const c = this.color;
|
||||
const colorCode = '\u001B[3' + (c < 8 ? c : '8;5;' + c);
|
||||
const prefix = ` ${colorCode};1m${name} \u001B[0m`;
|
||||
|
||||
args[0] = prefix + args[0].split('\n').join('\n' + prefix);
|
||||
args.push(colorCode + 'm+' + module.exports.humanize(this.diff) + '\u001B[0m');
|
||||
} else {
|
||||
args[0] = getDate() + name + ' ' + args[0];
|
||||
}
|
||||
}
|
||||
|
||||
function getDate() {
|
||||
if (exports.inspectOpts.hideDate) {
|
||||
return '';
|
||||
}
|
||||
return new Date().toISOString() + ' ';
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes `util.formatWithOptions()` with the specified arguments and writes to stderr.
|
||||
*/
|
||||
|
||||
function log(...args) {
|
||||
return process.stderr.write(util.formatWithOptions(exports.inspectOpts, ...args) + '\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Save `namespaces`.
|
||||
*
|
||||
* @param {String} namespaces
|
||||
* @api private
|
||||
*/
|
||||
function save(namespaces) {
|
||||
if (namespaces) {
|
||||
process.env.DEBUG = namespaces;
|
||||
} else {
|
||||
// If you set a process.env field to null or undefined, it gets cast to the
|
||||
// string 'null' or 'undefined'. Just delete instead.
|
||||
delete process.env.DEBUG;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load `namespaces`.
|
||||
*
|
||||
* @return {String} returns the previously persisted debug modes
|
||||
* @api private
|
||||
*/
|
||||
|
||||
function load() {
|
||||
return process.env.DEBUG;
|
||||
}
|
||||
|
||||
/**
|
||||
* Init logic for `debug` instances.
|
||||
*
|
||||
* Create a new `inspectOpts` object in case `useColors` is set
|
||||
* differently for a particular `debug` instance.
|
||||
*/
|
||||
|
||||
function init(debug) {
|
||||
debug.inspectOpts = {};
|
||||
|
||||
const keys = Object.keys(exports.inspectOpts);
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
debug.inspectOpts[keys[i]] = exports.inspectOpts[keys[i]];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = require('./common')(exports);
|
||||
|
||||
const {formatters} = module.exports;
|
||||
|
||||
/**
|
||||
* Map %o to `util.inspect()`, all on a single line.
|
||||
*/
|
||||
|
||||
formatters.o = function (v) {
|
||||
this.inspectOpts.colors = this.useColors;
|
||||
return util.inspect(v, this.inspectOpts)
|
||||
.split('\n')
|
||||
.map(str => str.trim())
|
||||
.join(' ');
|
||||
};
|
||||
|
||||
/**
|
||||
* Map %O to `util.inspect()`, allowing multiple lines if needed.
|
||||
*/
|
||||
|
||||
formatters.O = function (v) {
|
||||
this.inspectOpts.colors = this.useColors;
|
||||
return util.inspect(v, this.inspectOpts);
|
||||
};
|
||||
162
node_modules/ioredis/node_modules/ms/index.js
generated
vendored
Normal file
162
node_modules/ioredis/node_modules/ms/index.js
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* Helpers.
|
||||
*/
|
||||
|
||||
var s = 1000;
|
||||
var m = s * 60;
|
||||
var h = m * 60;
|
||||
var d = h * 24;
|
||||
var w = d * 7;
|
||||
var y = d * 365.25;
|
||||
|
||||
/**
|
||||
* Parse or format the given `val`.
|
||||
*
|
||||
* Options:
|
||||
*
|
||||
* - `long` verbose formatting [false]
|
||||
*
|
||||
* @param {String|Number} val
|
||||
* @param {Object} [options]
|
||||
* @throws {Error} throw an error if val is not a non-empty string or a number
|
||||
* @return {String|Number}
|
||||
* @api public
|
||||
*/
|
||||
|
||||
module.exports = function (val, options) {
|
||||
options = options || {};
|
||||
var type = typeof val;
|
||||
if (type === 'string' && val.length > 0) {
|
||||
return parse(val);
|
||||
} else if (type === 'number' && isFinite(val)) {
|
||||
return options.long ? fmtLong(val) : fmtShort(val);
|
||||
}
|
||||
throw new Error(
|
||||
'val is not a non-empty string or a valid number. val=' +
|
||||
JSON.stringify(val)
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse the given `str` and return milliseconds.
|
||||
*
|
||||
* @param {String} str
|
||||
* @return {Number}
|
||||
* @api private
|
||||
*/
|
||||
|
||||
function parse(str) {
|
||||
str = String(str);
|
||||
if (str.length > 100) {
|
||||
return;
|
||||
}
|
||||
var match = /^(-?(?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|weeks?|w|years?|yrs?|y)?$/i.exec(
|
||||
str
|
||||
);
|
||||
if (!match) {
|
||||
return;
|
||||
}
|
||||
var n = parseFloat(match[1]);
|
||||
var type = (match[2] || 'ms').toLowerCase();
|
||||
switch (type) {
|
||||
case 'years':
|
||||
case 'year':
|
||||
case 'yrs':
|
||||
case 'yr':
|
||||
case 'y':
|
||||
return n * y;
|
||||
case 'weeks':
|
||||
case 'week':
|
||||
case 'w':
|
||||
return n * w;
|
||||
case 'days':
|
||||
case 'day':
|
||||
case 'd':
|
||||
return n * d;
|
||||
case 'hours':
|
||||
case 'hour':
|
||||
case 'hrs':
|
||||
case 'hr':
|
||||
case 'h':
|
||||
return n * h;
|
||||
case 'minutes':
|
||||
case 'minute':
|
||||
case 'mins':
|
||||
case 'min':
|
||||
case 'm':
|
||||
return n * m;
|
||||
case 'seconds':
|
||||
case 'second':
|
||||
case 'secs':
|
||||
case 'sec':
|
||||
case 's':
|
||||
return n * s;
|
||||
case 'milliseconds':
|
||||
case 'millisecond':
|
||||
case 'msecs':
|
||||
case 'msec':
|
||||
case 'ms':
|
||||
return n;
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Short format for `ms`.
|
||||
*
|
||||
* @param {Number} ms
|
||||
* @return {String}
|
||||
* @api private
|
||||
*/
|
||||
|
||||
function fmtShort(ms) {
|
||||
var msAbs = Math.abs(ms);
|
||||
if (msAbs >= d) {
|
||||
return Math.round(ms / d) + 'd';
|
||||
}
|
||||
if (msAbs >= h) {
|
||||
return Math.round(ms / h) + 'h';
|
||||
}
|
||||
if (msAbs >= m) {
|
||||
return Math.round(ms / m) + 'm';
|
||||
}
|
||||
if (msAbs >= s) {
|
||||
return Math.round(ms / s) + 's';
|
||||
}
|
||||
return ms + 'ms';
|
||||
}
|
||||
|
||||
/**
|
||||
* Long format for `ms`.
|
||||
*
|
||||
* @param {Number} ms
|
||||
* @return {String}
|
||||
* @api private
|
||||
*/
|
||||
|
||||
function fmtLong(ms) {
|
||||
var msAbs = Math.abs(ms);
|
||||
if (msAbs >= d) {
|
||||
return plural(ms, msAbs, d, 'day');
|
||||
}
|
||||
if (msAbs >= h) {
|
||||
return plural(ms, msAbs, h, 'hour');
|
||||
}
|
||||
if (msAbs >= m) {
|
||||
return plural(ms, msAbs, m, 'minute');
|
||||
}
|
||||
if (msAbs >= s) {
|
||||
return plural(ms, msAbs, s, 'second');
|
||||
}
|
||||
return ms + ' ms';
|
||||
}
|
||||
|
||||
/**
|
||||
* Pluralization helper.
|
||||
*/
|
||||
|
||||
function plural(ms, msAbs, n, name) {
|
||||
var isPlural = msAbs >= n * 1.5;
|
||||
return Math.round(ms / n) + ' ' + name + (isPlural ? 's' : '');
|
||||
}
|
||||
21
node_modules/ioredis/node_modules/ms/license.md
generated
vendored
Normal file
21
node_modules/ioredis/node_modules/ms/license.md
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2020 Vercel, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
38
node_modules/ioredis/node_modules/ms/package.json
generated
vendored
Normal file
38
node_modules/ioredis/node_modules/ms/package.json
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"name": "ms",
|
||||
"version": "2.1.3",
|
||||
"description": "Tiny millisecond conversion utility",
|
||||
"repository": "vercel/ms",
|
||||
"main": "./index",
|
||||
"files": [
|
||||
"index.js"
|
||||
],
|
||||
"scripts": {
|
||||
"precommit": "lint-staged",
|
||||
"lint": "eslint lib/* bin/*",
|
||||
"test": "mocha tests.js"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": "eslint:recommended",
|
||||
"env": {
|
||||
"node": true,
|
||||
"es6": true
|
||||
}
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.js": [
|
||||
"npm run lint",
|
||||
"prettier --single-quote --write",
|
||||
"git add"
|
||||
]
|
||||
},
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"eslint": "4.18.2",
|
||||
"expect.js": "0.3.1",
|
||||
"husky": "0.14.3",
|
||||
"lint-staged": "5.0.0",
|
||||
"mocha": "4.0.1",
|
||||
"prettier": "2.0.5"
|
||||
}
|
||||
}
|
||||
59
node_modules/ioredis/node_modules/ms/readme.md
generated
vendored
Normal file
59
node_modules/ioredis/node_modules/ms/readme.md
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# ms
|
||||
|
||||

|
||||
|
||||
Use this package to easily convert various time formats to milliseconds.
|
||||
|
||||
## Examples
|
||||
|
||||
```js
|
||||
ms('2 days') // 172800000
|
||||
ms('1d') // 86400000
|
||||
ms('10h') // 36000000
|
||||
ms('2.5 hrs') // 9000000
|
||||
ms('2h') // 7200000
|
||||
ms('1m') // 60000
|
||||
ms('5s') // 5000
|
||||
ms('1y') // 31557600000
|
||||
ms('100') // 100
|
||||
ms('-3 days') // -259200000
|
||||
ms('-1h') // -3600000
|
||||
ms('-200') // -200
|
||||
```
|
||||
|
||||
### Convert from Milliseconds
|
||||
|
||||
```js
|
||||
ms(60000) // "1m"
|
||||
ms(2 * 60000) // "2m"
|
||||
ms(-3 * 60000) // "-3m"
|
||||
ms(ms('10 hours')) // "10h"
|
||||
```
|
||||
|
||||
### Time Format Written-Out
|
||||
|
||||
```js
|
||||
ms(60000, { long: true }) // "1 minute"
|
||||
ms(2 * 60000, { long: true }) // "2 minutes"
|
||||
ms(-3 * 60000, { long: true }) // "-3 minutes"
|
||||
ms(ms('10 hours'), { long: true }) // "10 hours"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- Works both in [Node.js](https://nodejs.org) and in the browser
|
||||
- If a number is supplied to `ms`, a string with a unit is returned
|
||||
- If a string that contains the number is supplied, it returns it as a number (e.g.: it returns `100` for `'100'`)
|
||||
- If you pass a string with a number and a valid unit, the number of equivalent milliseconds is returned
|
||||
|
||||
## Related Packages
|
||||
|
||||
- [ms.macro](https://github.com/knpwrs/ms.macro) - Run `ms` as a macro at build-time.
|
||||
|
||||
## Caught a Bug?
|
||||
|
||||
1. [Fork](https://help.github.com/articles/fork-a-repo/) this repository to your own GitHub account and then [clone](https://help.github.com/articles/cloning-a-repository/) it to your local device
|
||||
2. Link the package to the global module directory: `npm link`
|
||||
3. Within the module you want to test your local development instance of ms, just link it to the dependencies: `npm link ms`. Instead of the default one from npm, Node.js will now use your clone of ms!
|
||||
|
||||
As always, you can run the tests using: `npm test`
|
||||
102
node_modules/ioredis/package.json
generated
vendored
Normal file
102
node_modules/ioredis/package.json
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
{
|
||||
"name": "ioredis",
|
||||
"version": "5.10.1",
|
||||
"description": "A robust, performance-focused and full-featured Redis client for Node.js.",
|
||||
"main": "./built/index.js",
|
||||
"types": "./built/index.d.ts",
|
||||
"files": [
|
||||
"built/"
|
||||
],
|
||||
"scripts": {
|
||||
"docker:setup": "docker compose -f test/docker-compose.yml up -d --wait",
|
||||
"docker:teardown": "docker compose -f test/docker-compose.yml down --volumes --remove-orphans",
|
||||
"test:tsd": "npm run build && tsd",
|
||||
"test:js": "TS_NODE_TRANSPILE_ONLY=true NODE_ENV=test mocha --no-experimental-strip-types \"test/helpers/*.ts\" \"test/unit/**/*.ts\" \"test/functional/**/*.ts\"",
|
||||
"test:cov": "nyc npm run test:js",
|
||||
"test:cluster": "TS_NODE_TRANSPILE_ONLY=true NODE_ENV=test mocha --no-experimental-strip-types \"test/cluster/**/*.ts\"",
|
||||
"test": "npm run test:js && npm run test:tsd",
|
||||
"lint": "eslint --ext .js,.ts ./lib",
|
||||
"docs": "npx typedoc --logLevel Error --excludeExternals --excludeProtected --excludePrivate --readme none lib/index.ts",
|
||||
"format": "prettier --write \"{,!(node_modules)/**/}*.{js,ts}\"",
|
||||
"format-check": "prettier --check \"{,!(node_modules)/**/}*.{js,ts}\"",
|
||||
"build": "rm -rf built && tsc",
|
||||
"prepublishOnly": "npm run build",
|
||||
"semantic-release": "semantic-release"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/luin/ioredis.git"
|
||||
},
|
||||
"keywords": [
|
||||
"redis",
|
||||
"cluster",
|
||||
"sentinel",
|
||||
"pipelining"
|
||||
],
|
||||
"tsd": {
|
||||
"directory": "test/typing"
|
||||
},
|
||||
"author": "Zihua Li <i@zihua.li> (http://zihua.li)",
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/ioredis"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ioredis/commands": "1.5.1",
|
||||
"cluster-key-slot": "^1.1.0",
|
||||
"debug": "^4.3.4",
|
||||
"denque": "^2.1.0",
|
||||
"lodash.defaults": "^4.2.0",
|
||||
"lodash.isarguments": "^3.1.0",
|
||||
"redis-errors": "^1.2.0",
|
||||
"redis-parser": "^3.0.0",
|
||||
"standard-as-callback": "^2.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@ioredis/interface-generator": "^1.3.0",
|
||||
"@semantic-release/changelog": "^6.0.1",
|
||||
"@semantic-release/commit-analyzer": "^9.0.2",
|
||||
"@semantic-release/git": "^10.0.1",
|
||||
"@types/chai": "^4.3.0",
|
||||
"@types/chai-as-promised": "^7.1.5",
|
||||
"@types/debug": "^4.1.5",
|
||||
"@types/lodash.defaults": "^4.2.7",
|
||||
"@types/lodash.isarguments": "^3.1.7",
|
||||
"@types/mocha": "^9.1.0",
|
||||
"@types/node": "^14.18.12",
|
||||
"@types/redis-errors": "^1.2.1",
|
||||
"@types/sinon": "^10.0.11",
|
||||
"@typescript-eslint/eslint-plugin": "^5.48.1",
|
||||
"@typescript-eslint/parser": "^5.48.1",
|
||||
"chai": "^4.3.6",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"eslint": "^8.31.0",
|
||||
"eslint-config-prettier": "^8.6.0",
|
||||
"mocha": "^9.2.1",
|
||||
"nyc": "^15.1.0",
|
||||
"prettier": "^2.6.1",
|
||||
"semantic-release": "^19.0.2",
|
||||
"server-destroy": "^1.0.1",
|
||||
"sinon": "^13.0.1",
|
||||
"ts-node": "^10.4.0",
|
||||
"tsd": "^0.19.1",
|
||||
"typedoc": "^0.22.18",
|
||||
"typescript": "^4.6.3",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"nyc": {
|
||||
"reporter": [
|
||||
"lcov"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.22.0"
|
||||
},
|
||||
"mocha": {
|
||||
"exit": true,
|
||||
"timeout": 8000,
|
||||
"recursive": true,
|
||||
"require": "ts-node/register"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user